X-Git-Url: http://plrg.eecs.uci.edu/git/?p=c11tester.git;a=blobdiff_plain;f=model.cc;h=8c88a05469515e92c9f324ef3afb619ee90c0344;hp=31e0694e459b018da371ae4ff8c484369582f49d;hb=0b3c8355b0f1fdf7e4b983bef11472192405a9d9;hpb=7da7d6e22d9e7dcdc65711c4459a6e9af30c1458 diff --git a/model.cc b/model.cc index 31e0694e..ae378894 100644 --- a/model.cc +++ b/model.cc @@ -1,5 +1,7 @@ #include #include +#include +#include #include "model.h" #include "action.h" @@ -11,43 +13,88 @@ #include "cyclegraph.h" #include "promise.h" #include "datarace.h" -#include "mutex.h" -#include "threads.h" +#include "threads-model.h" +#include "output.h" #define INITIAL_THREAD_ID 0 ModelChecker *model; +struct bug_message { + bug_message(const char *str) { + const char *fmt = " [BUG] %s\n"; + msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str)); + sprintf(msg, fmt, str); + } + ~bug_message() { if (msg) snapshot_free(msg); } + + char *msg; + void print() { model_print("%s", msg); } + + SNAPSHOTALLOC +}; + +/** + * Structure for holding small ModelChecker members that should be snapshotted + */ +struct model_snapshot_members { + model_snapshot_members() : + /* First thread created will have id INITIAL_THREAD_ID */ + next_thread_id(INITIAL_THREAD_ID), + used_sequence_numbers(0), + next_backtrack(NULL), + bugs(), + stats(), + failed_promise(false), + too_many_reads(false), + no_valid_reads(false), + bad_synchronization(false), + asserted(false) + { } + + ~model_snapshot_members() { + for (unsigned int i = 0; i < bugs.size(); i++) + delete bugs[i]; + bugs.clear(); + } + + unsigned int next_thread_id; + modelclock_t used_sequence_numbers; + ModelAction *next_backtrack; + std::vector< bug_message *, SnapshotAlloc > bugs; + struct execution_stats stats; + bool failed_promise; + bool too_many_reads; + bool no_valid_reads; + /** @brief Incorrectly-ordered synchronization was made */ + bool bad_synchronization; + bool asserted; + + SNAPSHOTALLOC +}; + /** @brief Constructor */ ModelChecker::ModelChecker(struct model_params params) : /* Initialize default scheduler */ params(params), scheduler(new Scheduler()), - num_executions(0), - num_feasible_executions(0), diverge(NULL), earliest_diverge(NULL), action_trace(new action_list_t()), thread_map(new HashTable()), - obj_map(new HashTable()), - lock_waiters_map(new HashTable()), - obj_thrd_map(new HashTable, uintptr_t, 4 >()), - promises(new std::vector()), - futurevalues(new std::vector()), - pending_rel_seqs(new std::vector()), - thrd_last_action(new std::vector(1)), + obj_map(new HashTable()), + lock_waiters_map(new HashTable()), + condvar_waiters_map(new HashTable()), + obj_thrd_map(new HashTable *, uintptr_t, 4 >()), + promises(new std::vector< Promise *, SnapshotAlloc >()), + futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc >()), + pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc >()), + thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc >(1)), + thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc >()), node_stack(new NodeStack()), - mo_graph(new CycleGraph()), - failed_promise(false), - too_many_reads(false), - asserted(false), - bad_synchronization(false) + priv(new struct model_snapshot_members()), + mo_graph(new CycleGraph()) { - /* Allocate this "size" on the snapshotting heap */ - priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv)); - /* First thread created will have id INITIAL_THREAD_ID */ - priv->next_thread_id = INITIAL_THREAD_ID; - /* Initialize a model-checker thread, for special ModelActions */ model_thread = new Thread(get_next_id()); thread_map->put(id_to_int(model_thread->get_id()), model_thread); @@ -63,6 +110,7 @@ ModelChecker::~ModelChecker() delete obj_thrd_map; delete obj_map; delete lock_waiters_map; + delete condvar_waiters_map; delete action_trace; for (unsigned int i = 0; i < promises->size(); i++) @@ -72,9 +120,31 @@ ModelChecker::~ModelChecker() delete pending_rel_seqs; delete thrd_last_action; + delete thrd_last_fence_release; delete node_stack; delete scheduler; delete mo_graph; + delete priv; +} + +static action_list_t * get_safe_ptr_action(HashTable * hash, void * ptr) +{ + action_list_t *tmp = hash->get(ptr); + if (tmp == NULL) { + tmp = new action_list_t(); + hash->put(ptr, tmp); + } + return tmp; +} + +static std::vector * get_safe_ptr_vect_action(HashTable *, uintptr_t, 4> * hash, void * ptr) +{ + std::vector *tmp = hash->get(ptr); + if (tmp == NULL) { + tmp = new std::vector(); + hash->put(ptr, tmp); + } + return tmp; } /** @@ -85,11 +155,19 @@ void ModelChecker::reset_to_initial_state() { DEBUG("+++ Resetting to initial state +++\n"); node_stack->reset_execution(); - failed_promise = false; - too_many_reads = false; - bad_synchronization = false; - reset_asserted(); - snapshotObject->backTrackBeforeStep(0); + + /* Print all model-checker output before rollback */ + fflush(model_out); + + /** + * FIXME: if we utilize partial rollback, we will need to free only + * those pending actions which were NOT pending before the rollback + * point + */ + for (unsigned int i = 0; i < get_num_threads(); i++) + delete get_thread(int_to_id(i))->get_pending(); + + snapshot_backtrack_before(0); } /** @return a thread ID for a new Thread */ @@ -99,13 +177,18 @@ thread_id_t ModelChecker::get_next_id() } /** @return the number of user threads created during this execution */ -unsigned int ModelChecker::get_num_threads() +unsigned int ModelChecker::get_num_threads() const { return priv->next_thread_id; } -/** @return The currently executing Thread. */ -Thread * ModelChecker::get_current_thread() +/** + * Must be called from user-thread context (e.g., through the global + * thread_current() interface) + * + * @return The currently executing Thread. + */ +Thread * ModelChecker::get_current_thread() const { return scheduler->get_current_thread(); } @@ -116,6 +199,11 @@ modelclock_t ModelChecker::get_next_seq_num() return ++priv->used_sequence_numbers; } +Node * ModelChecker::get_curr_node() const +{ + return node_stack->get_head(); +} + /** * @brief Choose the next thread to execute. * @@ -123,42 +211,51 @@ modelclock_t ModelChecker::get_next_seq_num() * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be * followed by a THREAD_START, or it can enforce execution replay/backtracking. * The model-checker may have no preference regarding the next thread (i.e., - * when exploring a new execution ordering), in which case this will return - * NULL. - * @param curr The current ModelAction. This action might guide the choice of - * next thread. - * @return The next thread to run. If the model-checker has no preference, NULL. + * when exploring a new execution ordering), in which case we defer to the + * scheduler. + * + * @param curr Optional: The current ModelAction. Only used if non-NULL and it + * might guide the choice of next thread (i.e., THREAD_CREATE should be + * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC}) + * @return The next chosen thread to run, if any exist. Or else if no threads + * remain to be executed, return NULL. */ Thread * ModelChecker::get_next_thread(ModelAction *curr) { thread_id_t tid; - if (curr!=NULL) { + if (curr != NULL) { /* Do not split atomic actions. */ if (curr->is_rmwr()) - return thread_current(); - /* The THREAD_CREATE action points to the created Thread */ + return get_thread(curr); else if (curr->get_type() == THREAD_CREATE) - return (Thread *)curr->get_location(); + return curr->get_thread_operand(); } - /* Have we completed exploring the preselected path? */ + /* + * Have we completed exploring the preselected path? Then let the + * scheduler decide + */ if (diverge == NULL) - return NULL; + return scheduler->select_next_thread(); /* Else, we are trying to replay an execution */ ModelAction *next = node_stack->get_next()->get_action(); if (next == diverge) { if (earliest_diverge == NULL || *diverge < *earliest_diverge) - earliest_diverge=diverge; + earliest_diverge = diverge; Node *nextnode = next->get_node(); Node *prevnode = nextnode->get_parent(); scheduler->update_sleep_set(prevnode); /* Reached divergence point */ - if (nextnode->increment_promise()) { + if (nextnode->increment_misc()) { + /* The next node will try to satisfy a different misc_index values. */ + tid = next->get_tid(); + node_stack->pop_restofstack(2); + } else if (nextnode->increment_promise()) { /* The next node will try to satisfy a different set of promises. */ tid = next->get_tid(); node_stack->pop_restofstack(2); @@ -175,14 +272,14 @@ Thread * ModelChecker::get_next_thread(ModelAction *curr) tid = next->get_tid(); node_stack->pop_restofstack(2); } else { + ASSERT(prevnode); /* Make a different thread execute for next step */ - scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid()))); + scheduler->add_sleep(get_thread(next->get_tid())); tid = prevnode->get_next_backtrack(); /* Make sure the backtracked thread isn't sleeping. */ - scheduler->remove_sleep(thread_map->get(id_to_int(tid))); node_stack->pop_restofstack(1); - if (diverge==earliest_diverge) { - earliest_diverge=prevnode->get_action(); + if (diverge == earliest_diverge) { + earliest_diverge = prevnode->get_action(); } } /* The correct sleep set is in the parent node. */ @@ -199,38 +296,235 @@ Thread * ModelChecker::get_next_thread(ModelAction *curr) return thread_map->get(id_to_int(tid)); } -/** +/** * We need to know what the next actions of all threads in the sleep * set will be. This method computes them and stores the actions at * the corresponding thread object's pending action. */ -void ModelChecker::execute_sleep_set() { - for(unsigned int i=0;iget_enabled(thr) == THREAD_SLEEP_SET ) { - thr->set_state(THREAD_RUNNING); - scheduler->next_thread(thr); - Thread::swap(&system_context, thr); - thr->set_pending(priv->current_action); +void ModelChecker::execute_sleep_set() +{ + for (unsigned int i = 0; i < get_num_threads(); i++) { + thread_id_t tid = int_to_id(i); + Thread *thr = get_thread(tid); + if (scheduler->is_sleep_set(thr) && thr->get_pending()) { + thr->get_pending()->set_sleep_flag(); } } - priv->current_action = NULL; } -void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) { - for(unsigned int i=0;iget_enabled(thr) == THREAD_SLEEP_SET ) { - ModelAction *pending_act=thr->get_pending(); - if (pending_act->could_synchronize_with(curr)) { - //Remove this thread from sleep set +/** + * @brief Should the current action wake up a given thread? + * + * @param curr The current action + * @param thread The thread that we might wake up + * @return True, if we should wake up the sleeping thread; false otherwise + */ +bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const +{ + const ModelAction *asleep = thread->get_pending(); + /* Don't allow partial RMW to wake anyone up */ + if (curr->is_rmwr()) + return false; + /* Synchronizing actions may have been backtracked */ + if (asleep->could_synchronize_with(curr)) + return true; + /* All acquire/release fences and fence-acquire/store-release */ + if (asleep->is_fence() && asleep->is_acquire() && curr->is_release()) + return true; + /* Fence-release + store can awake load-acquire on the same location */ + if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) { + ModelAction *fence_release = get_last_fence_release(curr->get_tid()); + if (fence_release && *(get_last_action(thread->get_id())) < *fence_release) + return true; + } + return false; +} + +void ModelChecker::wake_up_sleeping_actions(ModelAction *curr) +{ + for (unsigned int i = 0; i < get_num_threads(); i++) { + Thread *thr = get_thread(int_to_id(i)); + if (scheduler->is_sleep_set(thr)) { + if (should_wake_up(curr, thr)) + /* Remove this thread from sleep set */ scheduler->remove_sleep(thr); - } } - } + } +} + +/** @brief Alert the model-checker that an incorrectly-ordered + * synchronization was made */ +void ModelChecker::set_bad_synchronization() +{ + priv->bad_synchronization = true; +} + +/** + * Check whether the current trace has triggered an assertion which should halt + * its execution. + * + * @return True, if the execution should be aborted; false otherwise + */ +bool ModelChecker::has_asserted() const +{ + return priv->asserted; +} + +/** + * Trigger a trace assertion which should cause this execution to be halted. + * This can be due to a detected bug or due to an infeasibility that should + * halt ASAP. + */ +void ModelChecker::set_assert() +{ + priv->asserted = true; +} + +/** + * Check if we are in a deadlock. Should only be called at the end of an + * execution, although it should not give false positives in the middle of an + * execution (there should be some ENABLED thread). + * + * @return True if program is in a deadlock; false otherwise + */ +bool ModelChecker::is_deadlocked() const +{ + bool blocking_threads = false; + for (unsigned int i = 0; i < get_num_threads(); i++) { + thread_id_t tid = int_to_id(i); + if (is_enabled(tid)) + return false; + Thread *t = get_thread(tid); + if (!t->is_model_thread() && t->get_pending()) + blocking_threads = true; + } + return blocking_threads; +} + +/** + * Check if this is a complete execution. That is, have all thread completed + * execution (rather than exiting because sleep sets have forced a redundant + * execution). + * + * @return True if the execution is complete. + */ +bool ModelChecker::is_complete_execution() const +{ + for (unsigned int i = 0; i < get_num_threads(); i++) + if (is_enabled(int_to_id(i))) + return false; + return true; +} + +/** + * @brief Assert a bug in the executing program. + * + * Use this function to assert any sort of bug in the user program. If the + * current trace is feasible (actually, a prefix of some feasible execution), + * then this execution will be aborted, printing the appropriate message. If + * the current trace is not yet feasible, the error message will be stashed and + * printed if the execution ever becomes feasible. + * + * @param msg Descriptive message for the bug (do not include newline char) + * @return True if bug is immediately-feasible + */ +bool ModelChecker::assert_bug(const char *msg) +{ + priv->bugs.push_back(new bug_message(msg)); + + if (isfeasibleprefix()) { + set_assert(); + return true; + } + return false; +} + +/** + * @brief Assert a bug in the executing program, asserted by a user thread + * @see ModelChecker::assert_bug + * @param msg Descriptive message for the bug (do not include newline char) + */ +void ModelChecker::assert_user_bug(const char *msg) +{ + /* If feasible bug, bail out now */ + if (assert_bug(msg)) + switch_to_master(NULL); +} + +/** @return True, if any bugs have been reported for this execution */ +bool ModelChecker::have_bug_reports() const +{ + return priv->bugs.size() != 0; +} + +/** @brief Print bug report listing for this execution (if any bugs exist) */ +void ModelChecker::print_bugs() const +{ + if (have_bug_reports()) { + model_print("Bug report: %zu bug%s detected\n", + priv->bugs.size(), + priv->bugs.size() > 1 ? "s" : ""); + for (unsigned int i = 0; i < priv->bugs.size(); i++) + priv->bugs[i]->print(); + } +} + +/** + * @brief Record end-of-execution stats + * + * Must be run when exiting an execution. Records various stats. + * @see struct execution_stats + */ +void ModelChecker::record_stats() +{ + stats.num_total++; + if (!isfeasibleprefix()) + stats.num_infeasible++; + else if (have_bug_reports()) + stats.num_buggy_executions++; + else if (is_complete_execution()) + stats.num_complete++; + else + stats.num_redundant++; +} + +/** @brief Print execution stats */ +void ModelChecker::print_stats() const +{ + model_print("Number of complete, bug-free executions: %d\n", stats.num_complete); + model_print("Number of redundant executions: %d\n", stats.num_redundant); + model_print("Number of buggy executions: %d\n", stats.num_buggy_executions); + model_print("Number of infeasible executions: %d\n", stats.num_infeasible); + model_print("Total executions: %d\n", stats.num_total); + model_print("Total nodes created: %d\n", node_stack->get_total_nodes()); +} + +/** + * @brief End-of-exeuction print + * @param printbugs Should any existing bugs be printed? + */ +void ModelChecker::print_execution(bool printbugs) const +{ + print_program_output(); + + if (DBG_ENABLED() || params.verbose) { + model_print("Earliest divergence point since last feasible execution:\n"); + if (earliest_diverge) + earliest_diverge->print(); + else + model_print("(Not set)\n"); + + model_print("\n"); + print_stats(); + } + + /* Don't print invalid bugs */ + if (printbugs) + print_bugs(); + + model_print("\n"); + print_summary(); } /** @@ -243,31 +537,34 @@ void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) { bool ModelChecker::next_execution() { DBG(); + /* Is this execution a feasible execution that's worth bug-checking? */ + bool complete = isfeasibleprefix() && (is_complete_execution() || + have_bug_reports()); - num_executions++; - - if (isfinalfeasible()) { - printf("Earliest divergence point since last feasible execution:\n"); - if (earliest_diverge) - earliest_diverge->print(); - else - printf("(Not set)\n"); + /* End-of-execution bug checks */ + if (complete) { + if (is_deadlocked()) + assert_bug("Deadlock detected"); - earliest_diverge = NULL; - num_feasible_executions++; + checkDataRaces(); } - DEBUG("Number of acquires waiting on pending release sequences: %zu\n", - pending_rel_seqs->size()); + record_stats(); - if (isfinalfeasible() || DBG_ENABLED()) - print_summary(); + /* Output */ + if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports())) + print_execution(complete); + else + clear_program_output(); + + if (complete) + earliest_diverge = NULL; if ((diverge = get_next_backtrack()) == NULL) return false; if (DBG_ENABLED()) { - printf("Next execution will diverge at:\n"); + model_print("Next execution will diverge at:\n"); diverge->print(); } @@ -275,26 +572,123 @@ bool ModelChecker::next_execution() return true; } -ModelAction * ModelChecker::get_last_conflict(ModelAction *act) +/** + * @brief Find the last fence-related backtracking conflict for a ModelAction + * + * This function performs the search for the most recent conflicting action + * against which we should perform backtracking, as affected by fence + * operations. This includes pairs of potentially-synchronizing actions which + * occur due to fence-acquire or fence-release, and hence should be explored in + * the opposite execution order. + * + * @param act The current action + * @return The most recent action which conflicts with act due to fences + */ +ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const +{ + /* Only perform release/acquire fence backtracking for stores */ + if (!act->is_write()) + return NULL; + + /* Find a fence-release (or, act is a release) */ + ModelAction *last_release; + if (act->is_release()) + last_release = act; + else + last_release = get_last_fence_release(act->get_tid()); + if (!last_release) + return NULL; + + /* Skip past the release */ + action_list_t *list = action_trace; + action_list_t::reverse_iterator rit; + for (rit = list->rbegin(); rit != list->rend(); rit++) + if (*rit == last_release) + break; + ASSERT(rit != list->rend()); + + /* Find a prior: + * load-acquire + * or + * load --sb-> fence-acquire */ + std::vector< ModelAction *, ModelAlloc > acquire_fences(get_num_threads(), NULL); + std::vector< ModelAction *, ModelAlloc > prior_loads(get_num_threads(), NULL); + bool found_acquire_fences = false; + for ( ; rit != list->rend(); rit++) { + ModelAction *prev = *rit; + if (act->same_thread(prev)) + continue; + + int tid = id_to_int(prev->get_tid()); + + if (prev->is_read() && act->same_var(prev)) { + if (prev->is_acquire()) { + /* Found most recent load-acquire, don't need + * to search for more fences */ + if (!found_acquire_fences) + return NULL; + } else { + prior_loads[tid] = prev; + } + } + if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) { + found_acquire_fences = true; + acquire_fences[tid] = prev; + } + } + + ModelAction *latest_backtrack = NULL; + for (unsigned int i = 0; i < acquire_fences.size(); i++) + if (acquire_fences[i] && prior_loads[i]) + if (!latest_backtrack || *latest_backtrack < *acquire_fences[i]) + latest_backtrack = acquire_fences[i]; + return latest_backtrack; +} + +/** + * @brief Find the last backtracking conflict for a ModelAction + * + * This function performs the search for the most recent conflicting action + * against which we should perform backtracking. This primary includes pairs of + * synchronizing actions which should be explored in the opposite execution + * order. + * + * @param act The current action + * @return The most recent action which conflicts with act + */ +ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const { switch (act->get_type()) { + /* case ATOMIC_FENCE: fences don't directly cause backtracking */ case ATOMIC_READ: case ATOMIC_WRITE: case ATOMIC_RMW: { + ModelAction *ret = NULL; + /* linear search: from most recent to oldest */ - action_list_t *list = obj_map->get_safe_ptr(act->get_location()); + action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); action_list_t::reverse_iterator rit; for (rit = list->rbegin(); rit != list->rend(); rit++) { ModelAction *prev = *rit; - if (prev->could_synchronize_with(act)) - return prev; + if (prev->could_synchronize_with(act)) { + ret = prev; + break; + } } - break; + + ModelAction *ret2 = get_last_fence_conflict(act); + if (!ret2) + return ret; + if (!ret) + return ret2; + if (*ret < *ret2) + return ret2; + return ret; } case ATOMIC_LOCK: case ATOMIC_TRYLOCK: { /* linear search: from most recent to oldest */ - action_list_t *list = obj_map->get_safe_ptr(act->get_location()); + action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); action_list_t::reverse_iterator rit; for (rit = list->rbegin(); rit != list->rend(); rit++) { ModelAction *prev = *rit; @@ -305,11 +699,37 @@ ModelAction * ModelChecker::get_last_conflict(ModelAction *act) } case ATOMIC_UNLOCK: { /* linear search: from most recent to oldest */ - action_list_t *list = obj_map->get_safe_ptr(act->get_location()); + action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); + action_list_t::reverse_iterator rit; + for (rit = list->rbegin(); rit != list->rend(); rit++) { + ModelAction *prev = *rit; + if (!act->same_thread(prev) && prev->is_failed_trylock()) + return prev; + } + break; + } + case ATOMIC_WAIT: { + /* linear search: from most recent to oldest */ + action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); + action_list_t::reverse_iterator rit; + for (rit = list->rbegin(); rit != list->rend(); rit++) { + ModelAction *prev = *rit; + if (!act->same_thread(prev) && prev->is_failed_trylock()) + return prev; + if (!act->same_thread(prev) && prev->is_notify()) + return prev; + } + break; + } + + case ATOMIC_NOTIFY_ALL: + case ATOMIC_NOTIFY_ONE: { + /* linear search: from most recent to oldest */ + action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); action_list_t::reverse_iterator rit; for (rit = list->rbegin(); rit != list->rend(); rit++) { ModelAction *prev = *rit; - if (!act->same_thread(prev)&&prev->is_failed_trylock()) + if (!act->same_thread(prev) && prev->is_wait()) return prev; } break; @@ -328,38 +748,43 @@ ModelAction * ModelChecker::get_last_conflict(ModelAction *act) void ModelChecker::set_backtracking(ModelAction *act) { Thread *t = get_thread(act); - ModelAction * prev = get_last_conflict(act); + ModelAction *prev = get_last_conflict(act); if (prev == NULL) return; - Node * node = prev->get_node()->get_parent(); + Node *node = prev->get_node()->get_parent(); int low_tid, high_tid; - if (node->is_enabled(t)) { + if (node->enabled_status(t->get_id()) == THREAD_ENABLED) { low_tid = id_to_int(act->get_tid()); - high_tid = low_tid+1; + high_tid = low_tid + 1; } else { low_tid = 0; high_tid = get_num_threads(); } - for(int i = low_tid; i < high_tid; i++) { + for (int i = low_tid; i < high_tid; i++) { thread_id_t tid = int_to_id(i); - if (!node->is_enabled(tid)) + /* Make sure this thread can be enabled here. */ + if (i >= node->get_num_threads()) + break; + + /* Don't backtrack into a point where the thread is disabled or sleeping. */ + if (node->enabled_status(tid) != THREAD_ENABLED) continue; - + /* Check if this has been explored already */ if (node->has_been_explored(tid)) continue; /* See if fairness allows */ if (model->params.fairwindow != 0 && !node->has_priority(tid)) { - bool unfair=false; - for(int t=0;tget_num_threads();t++) { - thread_id_t tother=int_to_id(t); + bool unfair = false; + for (int t = 0; t < node->get_num_threads(); t++) { + thread_id_t tother = int_to_id(t); if (node->is_enabled(tother) && node->has_priority(tother)) { - unfair=true; + unfair = true; break; } } @@ -367,8 +792,7 @@ void ModelChecker::set_backtracking(ModelAction *act) continue; } /* Cache the latest backtracking point */ - if (!priv->next_backtrack || *prev > *priv->next_backtrack) - priv->next_backtrack = prev; + set_latest_backtrack(prev); /* If this is a new backtracking point, mark the tree */ if (!node->set_backtrack(tid)) @@ -383,6 +807,26 @@ void ModelChecker::set_backtracking(ModelAction *act) } } +/** + * @brief Cache the a backtracking point as the "most recent", if eligible + * + * Note that this does not prepare the NodeStack for this backtracking + * operation, it only caches the action on a per-execution basis + * + * @param act The operation at which we should explore a different next action + * (i.e., backtracking point) + * @return True, if this action is now the most recent backtracking point; + * false otherwise + */ +bool ModelChecker::set_latest_backtrack(ModelAction *act) +{ + if (!priv->next_backtrack || *act > *priv->next_backtrack) { + priv->next_backtrack = act; + return true; + } + return false; +} + /** * Returns last backtracking point. The model checker will explore a different * path for this point in the next execution. @@ -403,7 +847,7 @@ ModelAction * ModelChecker::get_next_backtrack() */ bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw) { - uint64_t value; + uint64_t value = VALUE_NONE; bool updated = false; while (true) { const ModelAction *reads_from = curr->get_node()->get_read_from(); @@ -418,25 +862,27 @@ bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw) r_status = r_modification_order(curr, reads_from); } - - if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) { + if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) { mo_graph->rollbackChanges(); - too_many_reads = false; + priv->too_many_reads = false; continue; } - curr->read_from(reads_from); + read_from(curr, reads_from); mo_graph->commitChanges(); - mo_check_promises(curr->get_tid(), reads_from); + mo_check_promises(curr, true); updated |= r_status; } else if (!second_part_of_rmw) { /* Read from future value */ - value = curr->get_node()->get_future_value(); - modelclock_t expiration = curr->get_node()->get_future_value_expiration(); - curr->read_from(NULL); - Promise *valuepromise = new Promise(curr, value, expiration); - promises->push_back(valuepromise); + struct future_value fv = curr->get_node()->get_future_value(); + Promise *promise = new Promise(curr, fv); + value = fv.value; + curr->set_read_from_promise(promise); + promises->push_back(promise); + mo_graph->startChanges(); + updated = r_modification_order(curr, promise); + mo_graph->commitChanges(); } get_thread(curr)->set_return_value(value); return updated; @@ -459,9 +905,19 @@ bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw) * * @return True if synchronization was updated; false otherwise */ -bool ModelChecker::process_mutex(ModelAction *curr) { - std::mutex *mutex = (std::mutex *)curr->get_location(); - struct std::mutex_state *state = mutex->get_state(); +bool ModelChecker::process_mutex(ModelAction *curr) +{ + std::mutex *mutex = NULL; + struct std::mutex_state *state = NULL; + + if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) { + mutex = (std::mutex *)curr->get_location(); + state = mutex->get_state(); + } else if (curr->is_wait()) { + mutex = (std::mutex *)curr->get_value(); + state = mutex->get_state(); + } + switch (curr->get_type()) { case ATOMIC_TRYLOCK: { bool success = !state->islocked; @@ -474,10 +930,8 @@ bool ModelChecker::process_mutex(ModelAction *curr) { } //otherwise fall into the lock case case ATOMIC_LOCK: { - if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) { - printf("Lock access before initialization\n"); - set_assert(); - } + if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) + assert_bug("Lock access before initialization"); state->islocked = true; ModelAction *unlock = get_last_unlock(curr); //synchronize with the previous unlock statement @@ -491,7 +945,34 @@ bool ModelChecker::process_mutex(ModelAction *curr) { //unlock the lock state->islocked = false; //wake up the other threads - action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location()); + action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location()); + //activate all the waiting threads + for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) { + scheduler->wake(get_thread(*rit)); + } + waiters->clear(); + break; + } + case ATOMIC_WAIT: { + //unlock the lock + state->islocked = false; + //wake up the other threads + action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value()); + //activate all the waiting threads + for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) { + scheduler->wake(get_thread(*rit)); + } + waiters->clear(); + //check whether we should go to sleep or not...simulate spurious failures + if (curr->get_node()->get_misc() == 0) { + get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr); + //disable us + scheduler->sleep(get_thread(curr)); + } + break; + } + case ATOMIC_NOTIFY_ALL: { + action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location()); //activate all the waiting threads for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) { scheduler->wake(get_thread(*rit)); @@ -499,12 +980,43 @@ bool ModelChecker::process_mutex(ModelAction *curr) { waiters->clear(); break; } + case ATOMIC_NOTIFY_ONE: { + action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location()); + int wakeupthread = curr->get_node()->get_misc(); + action_list_t::iterator it = waiters->begin(); + advance(it, wakeupthread); + scheduler->wake(get_thread(*it)); + waiters->erase(it); + break; + } + default: ASSERT(0); } return false; } +void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader) +{ + /* Do more ambitious checks now that mo is more complete */ + if (mo_may_allow(writer, reader)) { + Node *node = reader->get_node(); + + /* Find an ancestor thread which exists at the time of the reader */ + Thread *write_thread = get_thread(writer); + while (id_to_int(write_thread->get_id()) >= node->get_num_threads()) + write_thread = write_thread->get_parent(); + + struct future_value fv = { + writer->get_value(), + writer->get_seq_number() + params.maxfuturedelay, + write_thread->get_id(), + }; + if (node->add_future_value(fv)) + set_latest_backtrack(reader); + } +} + /** * Process a write ModelAction * @param curr The ModelAction to process @@ -518,20 +1030,68 @@ bool ModelChecker::process_write(ModelAction *curr) if (promises->size() == 0) { for (unsigned int i = 0; i < futurevalues->size(); i++) { struct PendingFutureValue pfv = (*futurevalues)[i]; - if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) && - (!priv->next_backtrack || *pfv.act > *priv->next_backtrack)) - priv->next_backtrack = pfv.act; + add_future_value(pfv.writer, pfv.act); } - futurevalues->resize(0); + futurevalues->clear(); } mo_graph->commitChanges(); - mo_check_promises(curr->get_tid(), curr); + mo_check_promises(curr, false); get_thread(curr)->set_return_value(VALUE_NONE); return updated_mod_order || updated_promises; } +/** + * Process a fence ModelAction + * @param curr The ModelAction to process + * @return True if synchronization was updated + */ +bool ModelChecker::process_fence(ModelAction *curr) +{ + /* + * fence-relaxed: no-op + * fence-release: only log the occurence (not in this function), for + * use in later synchronization + * fence-acquire (this function): search for hypothetical release + * sequences + */ + bool updated = false; + if (curr->is_acquire()) { + action_list_t *list = action_trace; + action_list_t::reverse_iterator rit; + /* Find X : is_read(X) && X --sb-> curr */ + for (rit = list->rbegin(); rit != list->rend(); rit++) { + ModelAction *act = *rit; + if (act == curr) + continue; + if (act->get_tid() != curr->get_tid()) + continue; + /* Stop at the beginning of the thread */ + if (act->is_thread_start()) + break; + /* Stop once we reach a prior fence-acquire */ + if (act->is_fence() && act->is_acquire()) + break; + if (!act->is_read()) + continue; + /* read-acquire will find its own release sequences */ + if (act->is_acquire()) + continue; + + /* Establish hypothetical release sequences */ + rel_heads_list_t release_heads; + get_release_seq_heads(curr, act, &release_heads); + for (unsigned int i = 0; i < release_heads.size(); i++) + if (!curr->synchronize_with(release_heads[i])) + set_bad_synchronization(); + if (release_heads.size() != 0) + updated = true; + } + } + return updated; +} + /** * @brief Process the current action for thread-related activity * @@ -549,33 +1109,40 @@ bool ModelChecker::process_thread_action(ModelAction *curr) switch (curr->get_type()) { case THREAD_CREATE: { - Thread *th = (Thread *)curr->get_location(); + thrd_t *thrd = (thrd_t *)curr->get_location(); + struct thread_params *params = (struct thread_params *)curr->get_value(); + Thread *th = new Thread(thrd, params->func, params->arg); + add_thread(th); th->set_creation(curr); + /* Promises can be satisfied by children */ + for (unsigned int i = 0; i < promises->size(); i++) { + Promise *promise = (*promises)[i]; + if (promise->thread_is_available(curr->get_tid())) + promise->add_thread(th->get_id()); + } break; } case THREAD_JOIN: { - Thread *waiting, *blocking; - waiting = get_thread(curr); - blocking = (Thread *)curr->get_location(); - if (!blocking->is_complete()) { - blocking->push_wait_list(curr); - scheduler->sleep(waiting); - } else { - do_complete_join(curr); - updated = true; /* trigger rel-seq checks */ - } + Thread *blocking = curr->get_thread_operand(); + ModelAction *act = get_last_action(blocking->get_id()); + curr->synchronize_with(act); + updated = true; /* trigger rel-seq checks */ break; } case THREAD_FINISH: { Thread *th = get_thread(curr); while (!th->wait_list_empty()) { ModelAction *act = th->pop_wait_list(); - Thread *wake = get_thread(act); - scheduler->wake(wake); - do_complete_join(act); - updated = true; /* trigger rel-seq checks */ + scheduler->wake(get_thread(act)); } th->complete(); + /* Completed thread can't satisfy promises */ + for (unsigned int i = 0; i < promises->size(); i++) { + Promise *promise = (*promises)[i]; + if (promise->thread_is_available(th->get_id())) + if (promise->eliminate_thread(th->get_id())) + priv->failed_promise = true; + } updated = true; /* trigger rel-seq checks */ break; } @@ -656,8 +1223,7 @@ void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_qu } /* See if we have realized a data race */ - if (checkDataRaces()) - set_assert(); + checkDataRaces(); } /** @@ -667,44 +1233,53 @@ void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_qu * initializing clock vectors, and computing the promises to fulfill. * * @param curr The current action, as passed from the user context; may be - * freed/invalidated after the execution of this function - * @return The current action, as processed by the ModelChecker. Is only the - * same as the parameter @a curr if this is a newly-explored action. + * freed/invalidated after the execution of this function, with a different + * action "returned" its place (pass-by-reference) + * @return True if curr is a newly-explored action; false otherwise */ -ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr) +bool ModelChecker::initialize_curr_action(ModelAction **curr) { ModelAction *newcurr; - if (curr->is_rmwc() || curr->is_rmw()) { - newcurr = process_rmw(curr); - delete curr; + if ((*curr)->is_rmwc() || (*curr)->is_rmw()) { + newcurr = process_rmw(*curr); + delete *curr; if (newcurr->is_rmw()) compute_promises(newcurr); - return newcurr; + + *curr = newcurr; + return false; } - curr->set_seq_number(get_next_seq_num()); + (*curr)->set_seq_number(get_next_seq_num()); - newcurr = node_stack->explore_action(curr, scheduler->get_enabled()); + newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array()); if (newcurr) { /* First restore type and order in case of RMW operation */ - if (curr->is_rmwr()) - newcurr->copy_typeandorder(curr); + if ((*curr)->is_rmwr()) + newcurr->copy_typeandorder(*curr); - ASSERT(curr->get_location() == newcurr->get_location()); - newcurr->copy_from_new(curr); + ASSERT((*curr)->get_location() == newcurr->get_location()); + newcurr->copy_from_new(*curr); /* Discard duplicate ModelAction; use action from NodeStack */ - delete curr; + delete *curr; /* Always compute new clock vector */ newcurr->create_cv(get_parent_action(newcurr->get_tid())); + + *curr = newcurr; + return false; /* Action was explored previously */ } else { - newcurr = curr; + newcurr = *curr; /* Always compute new clock vector */ newcurr->create_cv(get_parent_action(newcurr->get_tid())); + + /* Assign most recent release fence */ + newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid())); + /* * Perform one-time actions when pushing new ModelAction onto * NodeStack @@ -713,24 +1288,66 @@ ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr) compute_promises(newcurr); else if (newcurr->is_relseq_fixup()) compute_relseq_breakwrites(newcurr); + else if (newcurr->is_wait()) + newcurr->get_node()->set_misc_max(2); + else if (newcurr->is_notify_one()) { + newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size()); + } + return true; /* This was a new ModelAction */ + } +} + +/** + * @brief Establish reads-from relation between two actions + * + * Perform basic operations involved with establishing a concrete rf relation, + * including setting the ModelAction data and checking for release sequences. + * + * @param act The action that is reading (must be a read) + * @param rf The action from which we are reading (must be a write) + * + * @return True if this read established synchronization + */ +bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf) +{ + act->set_read_from(rf); + if (rf != NULL && act->is_acquire()) { + rel_heads_list_t release_heads; + get_release_seq_heads(act, act, &release_heads); + int num_heads = release_heads.size(); + for (unsigned int i = 0; i < release_heads.size(); i++) + if (!act->synchronize_with(release_heads[i])) { + set_bad_synchronization(); + num_heads--; + } + return num_heads > 0; } - return newcurr; + return false; } /** - * This method checks whether a model action is enabled at the given point. - * At this point, it checks whether a lock operation would be successful at this point. - * If not, it puts the thread in a waiter list. + * @brief Check whether a model action is enabled. + * + * Checks whether a lock or join operation would be successful (i.e., is the + * lock already locked, or is the joined thread already complete). If not, put + * the action in a waiter list. + * * @param curr is the ModelAction to check whether it is enabled. * @return a bool that indicates whether the action is enabled. */ bool ModelChecker::check_action_enabled(ModelAction *curr) { if (curr->is_lock()) { - std::mutex * lock = (std::mutex *)curr->get_location(); - struct std::mutex_state * state = lock->get_state(); + std::mutex *lock = (std::mutex *)curr->get_location(); + struct std::mutex_state *state = lock->get_state(); if (state->islocked) { //Stick the action in the appropriate waiting queue - lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr); + get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr); + return false; + } + } else if (curr->get_type() == THREAD_JOIN) { + Thread *blocking = (Thread *)curr->get_location(); + if (!blocking->is_complete()) { + blocking->push_wait_list(curr); return false; } } @@ -746,40 +1363,41 @@ bool ModelChecker::check_action_enabled(ModelAction *curr) { * execution when running permutations of previously-observed executions. * * @param curr The current action to process - * @return The next Thread that must be executed. May be NULL if ModelChecker - * makes no choice (e.g., according to replay execution, combining RMW actions, - * etc.) + * @return The ModelAction that is actually executed; may be different than + * curr; may be NULL, if the current action is not enabled to run */ -Thread * ModelChecker::check_current_action(ModelAction *curr) +ModelAction * ModelChecker::check_current_action(ModelAction *curr) { ASSERT(curr); bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw(); if (!check_action_enabled(curr)) { /* Make the execution look like we chose to run this action - * much later, when a lock is actually available to release */ - get_current_thread()->set_pending(curr); - scheduler->sleep(get_current_thread()); - return get_next_thread(NULL); + * much later, when a lock/join can succeed */ + get_thread(curr)->set_pending(curr); + scheduler->sleep(get_thread(curr)); + return NULL; } - wake_up_sleeping_actions(curr); + bool newly_explored = initialize_curr_action(&curr); - ModelAction *newcurr = initialize_curr_action(curr); + DBG(); + if (DBG_ENABLED()) + curr->print(); + wake_up_sleeping_actions(curr); /* Add the action to lists before any other model-checking tasks */ if (!second_part_of_rmw) - add_action_to_lists(newcurr); + add_action_to_lists(curr); /* Build may_read_from set for newly-created actions */ - if (curr == newcurr && curr->is_read()) - build_reads_from_past(curr); - curr = newcurr; + if (newly_explored && curr->is_read()) + build_may_read_from(curr); /* Initialize work_queue with the "current action" work */ work_queue_t work_queue(1, CheckCurrWorkEntry(curr)); - while (!work_queue.empty()) { + while (!work_queue.empty() && !has_asserted()) { WorkQueueEntry work = work_queue.front(); work_queue.pop_front(); @@ -798,6 +1416,9 @@ Thread * ModelChecker::check_current_action(ModelAction *curr) if (act->is_write() && process_write(act)) update = true; + if (act->is_fence() && process_fence(act)) + update_all = true; + if (act->is_mutex_op() && process_mutex(act)) update_all = true; @@ -820,8 +1441,14 @@ Thread * ModelChecker::check_current_action(ModelAction *curr) if (act->is_read()) { const ModelAction *rf = act->get_reads_from(); - if (rf != NULL && r_modification_order(act, rf)) - updated = true; + const Promise *promise = act->get_reads_from_promise(); + if (rf) { + if (r_modification_order(act, rf)) + updated = true; + } else if (promise) { + if (r_modification_order(act, promise)) + updated = true; + } } if (act->is_write()) { if (w_modification_order(act)) @@ -841,93 +1468,105 @@ Thread * ModelChecker::check_current_action(ModelAction *curr) check_curr_backtracking(curr); set_backtracking(curr); - return get_next_thread(curr); + return curr; } -/** - * Complete a THREAD_JOIN operation, by synchronizing with the THREAD_FINISH - * operation from the Thread it is joining with. Must be called after the - * completion of the Thread in question. - * @param join The THREAD_JOIN action - */ -void ModelChecker::do_complete_join(ModelAction *join) +void ModelChecker::check_curr_backtracking(ModelAction *curr) { - Thread *blocking = (Thread *)join->get_location(); - ModelAction *act = get_last_action(blocking->get_id()); - join->synchronize_with(act); -} - -void ModelChecker::check_curr_backtracking(ModelAction * curr) { Node *currnode = curr->get_node(); Node *parnode = currnode->get_parent(); - if ((!parnode->backtrack_empty() || + if ((parnode && !parnode->backtrack_empty()) || + !currnode->misc_empty() || !currnode->read_from_empty() || !currnode->future_value_empty() || !currnode->promise_empty() || - !currnode->relseq_break_empty()) - && (!priv->next_backtrack || - *curr > *priv->next_backtrack)) { - priv->next_backtrack = curr; + !currnode->relseq_break_empty()) { + set_latest_backtrack(curr); } } -bool ModelChecker::promises_expired() { - for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) { - Promise *promise = (*promises)[promise_index]; - if (promise->get_expiration()used_sequence_numbers) { +bool ModelChecker::promises_expired() const +{ + for (unsigned int i = 0; i < promises->size(); i++) { + Promise *promise = (*promises)[i]; + if (promise->get_expiration() < priv->used_sequence_numbers) return true; - } } return false; } -/** @return whether the current partial trace must be a prefix of a - * feasible trace. */ -bool ModelChecker::isfeasibleprefix() { - return promises->size() == 0 && pending_rel_seqs->size() == 0; +/** + * This is the strongest feasibility check available. + * @return whether the current trace (partial or complete) must be a prefix of + * a feasible trace. + */ +bool ModelChecker::isfeasibleprefix() const +{ + return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq(); } -/** @return whether the current partial trace is feasible. */ -bool ModelChecker::isfeasible() { - if (DBG_ENABLED() && mo_graph->checkForRMWViolation()) - DEBUG("Infeasible: RMW violation\n"); - - return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW(); +/** + * Print disagnostic information about an infeasible execution + * @param prefix A string to prefix the output with; if NULL, then a default + * message prefix will be provided + */ +void ModelChecker::print_infeasibility(const char *prefix) const +{ + char buf[100]; + char *ptr = buf; + if (mo_graph->checkForCycles()) + ptr += sprintf(ptr, "[mo cycle]"); + if (priv->failed_promise) + ptr += sprintf(ptr, "[failed promise]"); + if (priv->too_many_reads) + ptr += sprintf(ptr, "[too many reads]"); + if (priv->no_valid_reads) + ptr += sprintf(ptr, "[no valid reads-from]"); + if (priv->bad_synchronization) + ptr += sprintf(ptr, "[bad sw ordering]"); + if (promises_expired()) + ptr += sprintf(ptr, "[promise expired]"); + if (promises->size() != 0) + ptr += sprintf(ptr, "[unresolved promise]"); + if (ptr != buf) + model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf); } -/** @return whether the current partial trace is feasible other than - * multiple RMW reading from the same store. */ -bool ModelChecker::isfeasibleotherthanRMW() { - if (DBG_ENABLED()) { - if (mo_graph->checkForCycles()) - DEBUG("Infeasible: modification order cycles\n"); - if (failed_promise) - DEBUG("Infeasible: failed promise\n"); - if (too_many_reads) - DEBUG("Infeasible: too many reads\n"); - if (bad_synchronization) - DEBUG("Infeasible: bad synchronization ordering\n"); - if (promises_expired()) - DEBUG("Infeasible: promises expired\n"); - } - return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired(); +/** + * Returns whether the current completed trace is feasible, except for pending + * release sequences. + */ +bool ModelChecker::is_feasible_prefix_ignore_relseq() const +{ + return !is_infeasible() && promises->size() == 0; } -/** Returns whether the current completed trace is feasible. */ -bool ModelChecker::isfinalfeasible() { - if (DBG_ENABLED() && promises->size() != 0) - DEBUG("Infeasible: unrevolved promises\n"); - - return isfeasible() && promises->size() == 0; +/** + * Check if the current partial trace is infeasible. Does not check any + * end-of-execution flags, which might rule out the execution. Thus, this is + * useful only for ruling an execution as infeasible. + * @return whether the current partial trace is infeasible. + */ +bool ModelChecker::is_infeasible() const +{ + return mo_graph->checkForCycles() || + priv->no_valid_reads || + priv->failed_promise || + priv->too_many_reads || + priv->bad_synchronization || + promises_expired(); } /** Close out a RMWR by converting previous RMWR into a RMW or READ. */ ModelAction * ModelChecker::process_rmw(ModelAction *act) { ModelAction *lastread = get_last_action(act->get_tid()); lastread->process_rmw(act); - if (act->is_rmw() && lastread->get_reads_from()!=NULL) { - mo_graph->addRMWEdge(lastread->get_reads_from(), lastread); + if (act->is_rmw()) { + if (lastread->get_reads_from()) + mo_graph->addRMWEdge(lastread->get_reads_from(), lastread); + else + mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread); mo_graph->commitChanges(); } return lastread; @@ -944,16 +1583,16 @@ ModelAction * ModelChecker::process_rmw(ModelAction *act) { * * If so, we decide that the execution is no longer feasible. */ -void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) { +void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) +{ if (params.maxreads != 0) { - if (curr->get_node()->get_read_from_size() <= 1) return; //Must make sure that execution is currently feasible... We could //accidentally clear by rolling back - if (!isfeasible()) + if (is_infeasible()) return; - std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location()); + std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); int tid = id_to_int(curr->get_tid()); /* Skip checks */ @@ -971,44 +1610,40 @@ void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) { action_list_t::reverse_iterator ritcopy = rit; //See if we have enough reads from the same value int count = 0; - for (; count < params.maxreads; rit++,count++) { - if (rit==list->rend()) + for (; count < params.maxreads; rit++, count++) { + if (rit == list->rend()) return; ModelAction *act = *rit; if (!act->is_read()) return; - + if (act->get_reads_from() != rf) return; if (act->get_node()->get_read_from_size() <= 1) return; } - for (int i = 0; iget_node()->get_read_from_size(); i++) { - //Get write - const ModelAction * write = curr->get_node()->get_read_from_at(i); + for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) { + /* Get write */ + const ModelAction *write = curr->get_node()->get_read_from_at(i); - //Need a different write - if (write==rf) + /* Need a different write */ + if (write == rf) continue; - /* Test to see whether this is a feasible write to read from*/ - mo_graph->startChanges(); - r_modification_order(curr, write); - bool feasiblereadfrom = isfeasible(); - mo_graph->rollbackChanges(); + /* Test to see whether this is a feasible write to read from */ + /** NOTE: all members of read-from set should be + * feasible, so we no longer check it here **/ - if (!feasiblereadfrom) - continue; rit = ritcopy; bool feasiblewrite = true; //new we need to see if this write works for everyone - for (int loop = count; loop>0; loop--,rit++) { - ModelAction *act=*rit; + for (int loop = count; loop > 0; loop--, rit++) { + ModelAction *act = *rit; bool foundvalue = false; - for (int j = 0; jget_node()->get_read_from_size(); j++) { - if (act->get_node()->get_read_from_at(i)==write) { + for (int j = 0; j < act->get_node()->get_read_from_size(); j++) { + if (act->get_node()->get_read_from_at(j) == write) { foundvalue = true; break; } @@ -1019,7 +1654,7 @@ void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) { } } if (feasiblewrite) { - too_many_reads = true; + priv->too_many_reads = true; return; } } @@ -1031,7 +1666,7 @@ void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) { * read. * * Basic idea is the following: Go through each other thread and find - * the lastest action that happened before our read. Two cases: + * the last action that happened before our read. Two cases: * * (1) The action is a write => that write must either occur before * the write we read from or be the write we read from. @@ -1040,33 +1675,67 @@ void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) { * must occur before the write we read from or be the same write. * * @param curr The current action. Must be a read. - * @param rf The action that curr reads from. Must be a write. + * @param rf The ModelAction or Promise that curr reads from. Must be a write. * @return True if modification order edges were added; false otherwise */ -bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf) +template +bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf) { - std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location()); + std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); unsigned int i; bool added = false; ASSERT(curr->is_read()); + /* Last SC fence in the current thread */ + ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL); + /* Iterate over all threads */ for (i = 0; i < thrd_lists->size(); i++) { + /* Last SC fence in thread i */ + ModelAction *last_sc_fence_thread_local = NULL; + if (int_to_id((int)i) != curr->get_tid()) + last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL); + + /* Last SC fence in thread i, before last SC fence in current thread */ + ModelAction *last_sc_fence_thread_before = NULL; + if (last_sc_fence_local) + last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local); + /* Iterate over actions in thread, starting from most recent */ action_list_t *list = &(*thrd_lists)[i]; action_list_t::reverse_iterator rit; for (rit = list->rbegin(); rit != list->rend(); rit++) { ModelAction *act = *rit; + if (act->is_write() && !act->equals(rf) && act != curr) { + /* C++, Section 29.3 statement 5 */ + if (curr->is_seqcst() && last_sc_fence_thread_local && + *act < *last_sc_fence_thread_local) { + added = mo_graph->addEdge(act, rf) || added; + break; + } + /* C++, Section 29.3 statement 4 */ + else if (act->is_seqcst() && last_sc_fence_local && + *act < *last_sc_fence_local) { + added = mo_graph->addEdge(act, rf) || added; + break; + } + /* C++, Section 29.3 statement 6 */ + else if (last_sc_fence_thread_before && + *act < *last_sc_fence_thread_before) { + added = mo_graph->addEdge(act, rf) || added; + break; + } + } + /* * Include at most one act per-thread that "happens * before" curr. Don't consider reflexively. */ if (act->happens_before(curr) && act != curr) { if (act->is_write()) { - if (rf != act) { - mo_graph->addEdge(act, rf); - added = true; + if (!act->equals(rf)) { + added = mo_graph->addEdge(act, rf) || added; } } else { const ModelAction *prevreadfrom = act->get_reads_from(); @@ -1074,9 +1743,8 @@ bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf if (prevreadfrom == NULL) continue; - if (rf != prevreadfrom) { - mo_graph->addEdge(prevreadfrom, rf); - added = true; + if (!prevreadfrom->equals(rf)) { + added = mo_graph->addEdge(prevreadfrom, rf) || added; } } break; @@ -1084,71 +1752,15 @@ bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf } } - return added; -} - -/** This method fixes up the modification order when we resolve a - * promises. The basic problem is that actions that occur after the - * read curr could not property add items to the modification order - * for our read. - * - * So for each thread, we find the earliest item that happens after - * the read curr. This is the item we have to fix up with additional - * constraints. If that action is write, we add a MO edge between - * the Action rf and that action. If the action is a read, we add a - * MO edge between the Action rf, and whatever the read accessed. - * - * @param curr is the read ModelAction that we are fixing up MO edges for. - * @param rf is the write ModelAction that curr reads from. - * - */ -void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf) -{ - std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location()); - unsigned int i; - ASSERT(curr->is_read()); - - /* Iterate over all threads */ - for (i = 0; i < thrd_lists->size(); i++) { - /* Iterate over actions in thread, starting from most recent */ - action_list_t *list = &(*thrd_lists)[i]; - action_list_t::reverse_iterator rit; - ModelAction *lastact = NULL; - - /* Find last action that happens after curr that is either not curr or a rmw */ - for (rit = list->rbegin(); rit != list->rend(); rit++) { - ModelAction *act = *rit; - if (curr->happens_before(act) && (curr != act || curr->is_rmw())) { - lastact = act; - } else - break; - } + /* + * All compatible, thread-exclusive promises must be ordered after any + * concrete loads from the same thread + */ + for (unsigned int i = 0; i < promises->size(); i++) + if ((*promises)[i]->is_compatible_exclusive(curr)) + added = mo_graph->addEdge(rf, (*promises)[i]) || added; - /* Include at most one act per-thread that "happens before" curr */ - if (lastact != NULL) { - if (lastact==curr) { - //Case 1: The resolved read is a RMW, and we need to make sure - //that the write portion of the RMW mod order after rf - - mo_graph->addEdge(rf, lastact); - } else if (lastact->is_read()) { - //Case 2: The resolved read is a normal read and the next - //operation is a read, and we need to make sure the value read - //is mod ordered after rf - - const ModelAction *postreadfrom = lastact->get_reads_from(); - if (postreadfrom != NULL&&rf != postreadfrom) - mo_graph->addEdge(rf, postreadfrom); - } else { - //Case 3: The resolved read is a normal read and the next - //operation is a write, and we need to make sure that the - //write is mod ordered after rf - if (lastact!=rf) - mo_graph->addEdge(rf, lastact); - } - break; - } - } + return added; } /** @@ -1175,7 +1787,7 @@ void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelActio */ bool ModelChecker::w_modification_order(ModelAction *curr) { - std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location()); + std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); unsigned int i; bool added = false; ASSERT(curr->is_write()); @@ -1183,15 +1795,22 @@ bool ModelChecker::w_modification_order(ModelAction *curr) if (curr->is_seqcst()) { /* We have to at least see the last sequentially consistent write, so we are initialized. */ - ModelAction *last_seq_cst = get_last_seq_cst(curr); + ModelAction *last_seq_cst = get_last_seq_cst_write(curr); if (last_seq_cst != NULL) { - mo_graph->addEdge(last_seq_cst, curr); - added = true; + added = mo_graph->addEdge(last_seq_cst, curr) || added; } } + /* Last SC fence in the current thread */ + ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL); + /* Iterate over all threads */ for (i = 0; i < thrd_lists->size(); i++) { + /* Last SC fence in thread i, before last SC fence in current thread */ + ModelAction *last_sc_fence_thread_before = NULL; + if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid()) + last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local); + /* Iterate over actions in thread, starting from most recent */ action_list_t *list = &(*thrd_lists)[i]; action_list_t::reverse_iterator rit; @@ -1202,7 +1821,7 @@ bool ModelChecker::w_modification_order(ModelAction *curr) * 1) If RMW and it actually read from something, then we * already have all relevant edges, so just skip to next * thread. - * + * * 2) If RMW and it didn't read from anything, we should * whatever edge we can get to speed up convergence. * @@ -1210,14 +1829,21 @@ bool ModelChecker::w_modification_order(ModelAction *curr) * continue processing list. */ if (curr->is_rmw()) { - if (curr->get_reads_from()!=NULL) + if (curr->get_reads_from() != NULL) break; - else + else continue; } else continue; } + /* C++, Section 29.3 statement 7 */ + if (last_sc_fence_thread_before && act->is_write() && + *act < *last_sc_fence_thread_before) { + added = mo_graph->addEdge(act, curr) || added; + break; + } + /* * Include at most one act per-thread that "happens * before" curr @@ -1230,14 +1856,13 @@ bool ModelChecker::w_modification_order(ModelAction *curr) * readfrom(act) --mo--> act */ if (act->is_write()) - mo_graph->addEdge(act, curr); - else if (act->is_read()) { + added = mo_graph->addEdge(act, curr) || added; + else if (act->is_read()) { //if previous read accessed a null, just keep going if (act->get_reads_from() == NULL) continue; - mo_graph->addEdge(act->get_reads_from(), curr); + added = mo_graph->addEdge(act->get_reads_from(), curr) || added; } - added = true; break; } else if (act->is_read() && !act->could_synchronize_with(curr) && !act->same_thread(curr)) { @@ -1247,26 +1872,39 @@ bool ModelChecker::w_modification_order(ModelAction *curr) (3) cannot synchronize with us (4) is in a different thread => - that read could potentially read from our write. + that read could potentially read from our write. Note that + these checks are overly conservative at this point, we'll + do more checks before actually removing the + pendingfuturevalue. + */ if (thin_air_constraint_may_allow(curr, act)) { - if (isfeasible() || - (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) { - struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act}; - futurevalues->push_back(pfv); - } + if (!is_infeasible()) + futurevalues->push_back(PendingFutureValue(curr, act)); + else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from()) + add_future_value(curr, act); } } } } + /* + * All compatible, thread-exclusive promises must be ordered after any + * concrete stores to the same thread, or else they can be merged with + * this store later + */ + for (unsigned int i = 0; i < promises->size(); i++) + if ((*promises)[i]->is_compatible_exclusive(curr)) + added = mo_graph->addEdge(curr, (*promises)[i]) || added; + return added; } /** Arbitrary reads from the future are not allowed. Section 29.3 * part 9 places some constraints. This method checks one result of constraint * constraint. Others require compiler support. */ -bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) { +bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) +{ if (!writer->is_rmw()) return true; @@ -1284,6 +1922,42 @@ bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, con return true; } +/** + * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places + * some constraints. This method checks one the following constraint (others + * require compiler support): + * + * If X --hb-> Y --mo-> Z, then X should not read from Z. + */ +bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader) +{ + std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location()); + unsigned int i; + /* Iterate over all threads */ + for (i = 0; i < thrd_lists->size(); i++) { + const ModelAction *write_after_read = NULL; + + /* Iterate over actions in thread, starting from most recent */ + action_list_t *list = &(*thrd_lists)[i]; + action_list_t::reverse_iterator rit; + for (rit = list->rbegin(); rit != list->rend(); rit++) { + ModelAction *act = *rit; + + /* Don't disallow due to act == reader */ + if (!reader->happens_before(act) || reader == act) + break; + else if (act->is_write()) + write_after_read = act; + else if (act->is_read() && act->get_reads_from() != NULL) + write_after_read = act->get_reads_from(); + } + + if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer)) + return false; + } + return true; +} + /** * Finds the head(s) of the release sequence(s) containing a given ModelAction. * The ModelAction under consideration is expected to be taking part in @@ -1316,11 +1990,13 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, if (mo_graph->checkForCycles()) return false; - while (rf) { + for ( ; rf != NULL; rf = rf->get_reads_from()) { ASSERT(rf->is_write()); if (rf->is_release()) release_heads->push_back(rf); + else if (rf->get_last_fence_release()) + release_heads->push_back(rf->get_last_fence_release()); if (!rf->is_rmw()) break; /* End of RMW chain */ @@ -1334,8 +2010,6 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, /* acq_rel RMW is a sufficient stopping condition */ if (rf->is_acquire() && rf->is_release()) return true; /* complete */ - - rf = rf->get_reads_from(); }; if (!rf) { /* read from future: need to settle this later */ @@ -1346,10 +2020,19 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, if (rf->is_release()) return true; /* complete */ - /* else relaxed write; check modification order for contiguous subsequence - * -> rf must be same thread as release */ + /* else relaxed write + * - check for fence-release in the same thread (29.8, stmt. 3) + * - check modification order for contiguous subsequence + * -> rf must be same thread as release */ + + const ModelAction *fence_release = rf->get_last_fence_release(); + /* Synchronize with a fence-release unconditionally; we don't need to + * find any more "contiguous subsequence..." for it */ + if (fence_release) + release_heads->push_back(fence_release); + int tid = id_to_int(rf->get_tid()); - std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location()); + std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location()); action_list_t *list = &(*thrd_lists)[tid]; action_list_t::const_reverse_iterator rit; @@ -1357,14 +2040,21 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, rit = std::find(list->rbegin(), list->rend(), rf); ASSERT(rit != list->rend()); - /* Find the last write/release */ - for (; rit != list->rend(); rit++) + /* Find the last {write,fence}-release */ + for (; rit != list->rend(); rit++) { + if (fence_release && *(*rit) < *fence_release) + break; if ((*rit)->is_release()) break; + } if (rit == list->rend()) { /* No write-release in this thread */ return true; /* complete */ - } + } else if (fence_release && *(*rit) < *fence_release) { + /* The fence-release is more recent (and so, "stronger") than + * the most recent write-release */ + return true; /* complete */ + } /* else, need to establish contiguous release sequence */ ModelAction *release = *rit; ASSERT(rf->same_thread(release)); @@ -1384,7 +2074,7 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, ModelAction *last = get_last_action(int_to_id(i)); Thread *th = get_thread(int_to_id(i)); if ((last && rf->happens_before(last)) || - !scheduler->is_enabled(th) || + !is_enabled(th) || th->is_complete()) future_ordered = true; @@ -1400,8 +2090,8 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, continue; } - /* Only writes can break release sequences */ - if (!act->is_write()) + /* Only non-RMW writes can break release sequences */ + if (!act->is_write() || act->is_rmw()) continue; /* Check modification order */ @@ -1437,22 +2127,29 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, } /** - * A public interface for getting the release sequence head(s) with which a + * An interface for getting the release sequence head(s) with which a * given ModelAction must synchronize. This function only returns a non-empty * result when it can locate a release sequence head with certainty. Otherwise, * it may mark the internal state of the ModelChecker so that it will handle - * the release sequence at a later time, causing @a act to update its + * the release sequence at a later time, causing @a acquire to update its * synchronization at some later point in execution. - * @param act The 'acquire' action that may read from a release sequence + * + * @param acquire The 'acquire' action that may synchronize with a release + * sequence + * @param read The read action that may read from a release sequence; this may + * be the same as acquire, or else an earlier action in the same thread (i.e., + * when 'acquire' is a fence-acquire) * @param release_heads A pass-by-reference return parameter. Will be filled * with the head(s) of the release sequence(s), if they exists with certainty. * @see ModelChecker::release_seq_heads */ -void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads) +void ModelChecker::get_release_seq_heads(ModelAction *acquire, + ModelAction *read, rel_heads_list_t *release_heads) { - const ModelAction *rf = act->get_reads_from(); + const ModelAction *rf = read->get_reads_from(); struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq)); - sequence->acquire = act; + sequence->acquire = acquire; + sequence->read = read; if (!release_seq_heads(rf, release_heads, sequence)) { /* add act to 'lazy checking' list */ @@ -1478,24 +2175,25 @@ void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *rel bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue) { bool updated = false; - std::vector::iterator it = pending_rel_seqs->begin(); + std::vector< struct release_seq *, SnapshotAlloc >::iterator it = pending_rel_seqs->begin(); while (it != pending_rel_seqs->end()) { struct release_seq *pending = *it; - ModelAction *act = pending->acquire; + ModelAction *acquire = pending->acquire; + const ModelAction *read = pending->read; /* Only resolve sequences on the given location, if provided */ - if (location && act->get_location() != location) { + if (location && read->get_location() != location) { it++; continue; } - const ModelAction *rf = act->get_reads_from(); + const ModelAction *rf = read->get_reads_from(); rel_heads_list_t release_heads; bool complete; complete = release_seq_heads(rf, &release_heads, pending); for (unsigned int i = 0; i < release_heads.size(); i++) { - if (!act->has_synchronized_with(release_heads[i])) { - if (act->synchronize_with(release_heads[i])) + if (!acquire->has_synchronized_with(release_heads[i])) { + if (acquire->synchronize_with(release_heads[i])) updated = true; else set_bad_synchronization(); @@ -1505,15 +2203,16 @@ bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_ if (updated) { /* Re-check all pending release sequences */ work_queue->push_back(CheckRelSeqWorkEntry(NULL)); - /* Re-check act for mo_graph edges */ - work_queue->push_back(MOEdgeWorkEntry(act)); + /* Re-check read-acquire for mo_graph edges */ + if (acquire->is_read()) + work_queue->push_back(MOEdgeWorkEntry(acquire)); /* propagate synchronization to later actions */ action_list_t::reverse_iterator rit = action_trace->rbegin(); - for (; (*rit) != act; rit++) { + for (; (*rit) != acquire; rit++) { ModelAction *propagate = *rit; - if (act->happens_before(propagate)) { - propagate->synchronize_with(act); + if (acquire->happens_before(propagate)) { + propagate->synchronize_with(acquire); /* Re-check 'propagate' for mo_graph edges */ work_queue->push_back(MOEdgeWorkEntry(propagate)); } @@ -1528,9 +2227,7 @@ bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_ } // If we resolved promises or data races, see if we have realized a data race. - if (checkDataRaces()) { - set_assert(); - } + checkDataRaces(); return updated; } @@ -1545,18 +2242,48 @@ bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_ void ModelChecker::add_action_to_lists(ModelAction *act) { int tid = id_to_int(act->get_tid()); - action_trace->push_back(act); + ModelAction *uninit = NULL; + int uninit_id = -1; + action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); + if (list->empty() && act->is_atomic_var()) { + uninit = new_uninitialized_action(act->get_location()); + uninit_id = id_to_int(uninit->get_tid()); + list->push_back(uninit); + } + list->push_back(act); - obj_map->get_safe_ptr(act->get_location())->push_back(act); + action_trace->push_back(act); + if (uninit) + action_trace->push_front(uninit); - std::vector *vec = obj_thrd_map->get_safe_ptr(act->get_location()); + std::vector *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location()); if (tid >= (int)vec->size()) vec->resize(priv->next_thread_id); (*vec)[tid].push_back(act); + if (uninit) + (*vec)[uninit_id].push_front(uninit); if ((int)thrd_last_action->size() <= tid) thrd_last_action->resize(get_num_threads()); (*thrd_last_action)[tid] = act; + if (uninit) + (*thrd_last_action)[uninit_id] = uninit; + + if (act->is_fence() && act->is_release()) { + if ((int)thrd_last_fence_release->size() <= tid) + thrd_last_fence_release->resize(get_num_threads()); + (*thrd_last_fence_release)[tid] = act; + } + + if (act->is_wait()) { + void *mutex_loc = (void *) act->get_value(); + get_safe_ptr_action(obj_map, mutex_loc)->push_back(act); + + std::vector *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc); + if (tid >= (int)vec->size()) + vec->resize(priv->next_thread_id); + (*vec)[tid].push_back(act); + } } /** @@ -1573,6 +2300,20 @@ ModelAction * ModelChecker::get_last_action(thread_id_t tid) const return NULL; } +/** + * @brief Get the last fence release performed by a particular Thread + * @param tid The thread ID of the Thread in question + * @return The last fence release in the thread, if one exists; NULL otherwise + */ +ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const +{ + int threadid = id_to_int(tid); + if (threadid < (int)thrd_last_fence_release->size()) + return (*thrd_last_fence_release)[id_to_int(tid)]; + else + return NULL; +} + /** * Gets the last memory_order_seq_cst write (in the total global sequence) * performed on a particular object (i.e., memory location), not including the @@ -1581,10 +2322,10 @@ ModelAction * ModelChecker::get_last_action(thread_id_t tid) const * check * @return The last seq_cst write */ -ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const +ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const { void *location = curr->get_location(); - action_list_t *list = obj_map->get_safe_ptr(location); + action_list_t *list = get_safe_ptr_action(obj_map, location); /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */ action_list_t::reverse_iterator rit; for (rit = list->rbegin(); rit != list->rend(); rit++) @@ -1593,6 +2334,35 @@ ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const return NULL; } +/** + * Gets the last memory_order_seq_cst fence (in the total global sequence) + * performed in a particular thread, prior to a particular fence. + * @param tid The ID of the thread to check + * @param before_fence The fence from which to begin the search; if NULL, then + * search for the most recent fence in the thread. + * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL + */ +ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const +{ + /* All fences should have NULL location */ + action_list_t *list = get_safe_ptr_action(obj_map, NULL); + action_list_t::reverse_iterator rit = list->rbegin(); + + if (before_fence) { + for (; rit != list->rend(); rit++) + if (*rit == before_fence) + break; + + ASSERT(*rit == before_fence); + rit++; + } + + for (; rit != list->rend(); rit++) + if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst()) + return *rit; + return NULL; +} + /** * Gets the last unlock operation performed on a particular mutex (i.e., memory * location). This function identifies the mutex according to the current @@ -1604,16 +2374,16 @@ ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const { void *location = curr->get_location(); - action_list_t *list = obj_map->get_safe_ptr(location); + action_list_t *list = get_safe_ptr_action(obj_map, location); /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */ action_list_t::reverse_iterator rit; for (rit = list->rbegin(); rit != list->rend(); rit++) - if ((*rit)->is_unlock()) + if ((*rit)->is_unlock() || (*rit)->is_wait()) return *rit; return NULL; } -ModelAction * ModelChecker::get_parent_action(thread_id_t tid) +ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const { ModelAction *parent = get_last_action(tid); if (!parent) @@ -1626,7 +2396,7 @@ ModelAction * ModelChecker::get_parent_action(thread_id_t tid) * @param tid The thread whose clock vector we want * @return Desired clock vector */ -ClockVector * ModelChecker::get_cv(thread_id_t tid) +ClockVector * ModelChecker::get_cv(thread_id_t tid) const { return get_parent_action(tid)->get_cv(); } @@ -1639,42 +2409,44 @@ ClockVector * ModelChecker::get_cv(thread_id_t tid) */ bool ModelChecker::resolve_promises(ModelAction *write) { - bool resolved = false; - std::vector threads_to_check; + bool haveResolved = false; + std::vector< ModelAction *, ModelAlloc > actions_to_check; + promise_list_t mustResolve, resolved; for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) { Promise *promise = (*promises)[promise_index]; if (write->get_node()->get_promise(i)) { ModelAction *read = promise->get_action(); - if (read->is_rmw()) { - mo_graph->addRMWEdge(write, read); - } - read->read_from(write); - //First fix up the modification order for actions that happened - //before the read - r_modification_order(read, write); - //Next fix up the modification order for actions that happened - //after the read. - post_r_modification_order(read, write); + read_from(read, write); //Make sure the promise's value matches the write's value - ASSERT(promise->get_value() == write->get_value()); - delete(promise); - + ASSERT(promise->is_compatible(write)); + mo_graph->resolvePromise(read, write, &mustResolve); + + resolved.push_back(promise); promises->erase(promises->begin() + promise_index); - threads_to_check.push_back(read->get_tid()); + actions_to_check.push_back(read); - resolved = true; + haveResolved = true; } else promise_index++; } + for (unsigned int i = 0; i < mustResolve.size(); i++) { + if (std::find(resolved.begin(), resolved.end(), mustResolve[i]) + == resolved.end()) + priv->failed_promise = true; + } + for (unsigned int i = 0; i < resolved.size(); i++) + delete resolved[i]; //Check whether reading these writes has made threads unable to //resolve promises - for(unsigned int i=0;iis_read() && !act->could_synchronize_with(curr) && !act->same_thread(curr) && + act->get_location() == curr->get_location() && promise->get_value() == curr->get_value()) { - curr->get_node()->set_promise(i); + curr->get_node()->set_promise(i, act->is_rmw()); } } } @@ -1706,72 +2479,64 @@ void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVec const ModelAction *act = promise->get_action(); if ((old_cv == NULL || !old_cv->synchronized_since(act)) && merge_cv->synchronized_since(act)) { - if (promise->increment_threads(tid)) { + if (promise->eliminate_thread(tid)) { //Promise has failed - failed_promise = true; + priv->failed_promise = true; return; } } } } -/** Checks promises in response to addition to modification order for threads. - * Definitions: - * pthread is the thread that performed the read that created the promise - * - * pread is the read that created the promise - * - * pwrite is either the first write to same location as pread by - * pthread that is sequenced after pread or the value read by the - * first read to the same lcoation as pread by pthread that is - * sequenced after pread.. - * - * 1. If tid=pthread, then we check what other threads are reachable - * through the mode order starting with pwrite. Those threads cannot - * perform a write that will resolve the promise due to modification - * order constraints. - * - * 2. If the tid is not pthread, we check whether pwrite can reach the - * action write through the modification order. If so, that thread - * cannot perform a future write that will resolve the promise due to - * modificatin order constraints. +void ModelChecker::check_promises_thread_disabled() +{ + for (unsigned int i = 0; i < promises->size(); i++) { + Promise *promise = (*promises)[i]; + if (promise->has_failed()) { + priv->failed_promise = true; + return; + } + } +} + +/** + * @brief Checks promises in response to addition to modification order for + * threads. * - * @parem tid The thread that either read from the model action - * write, or actually did the model action write. + * We test whether threads are still available for satisfying promises after an + * addition to our modification order constraints. Those that are unavailable + * are "eliminated". Once all threads are eliminated from satisfying a promise, + * that promise has failed. * - * @parem write The ModelAction representing the relevant write. + * @param act The ModelAction which updated the modification order + * @param is_read_check Should be true if act is a read and we must check for + * updates to the store from which it read (there is a distinction here for + * RMW's, which are both a load and a store) */ +void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check) +{ + const ModelAction *write = is_read_check ? act->get_reads_from() : act; -void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) { - void * location = write->get_location(); for (unsigned int i = 0; i < promises->size(); i++) { Promise *promise = (*promises)[i]; - const ModelAction *act = promise->get_action(); - - //Is this promise on the same location? - if ( act->get_location() != location ) - continue; + const ModelAction *pread = promise->get_action(); - //same thread as the promise - if ( act->get_tid()==tid ) { + // Is this promise on the same location? + if (!pread->same_var(write)) + continue; - //do we have a pwrite for the promise, if not, set it - if (promise->get_write() == NULL ) { - promise->set_write(write); - } - if (mo_graph->checkPromise(write, promise)) { - failed_promise = true; - return; - } + if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) { + priv->failed_promise = true; + return; } - - //Don't do any lookups twice for the same thread - if (promise->has_sync_thread(tid)) + + // Don't do any lookups twice for the same thread + if (!promise->thread_is_available(act->get_tid())) continue; - - if (mo_graph->checkReachable(promise->get_write(), write)) { - if (promise->increment_threads(tid)) { - failed_promise = true; + + if (mo_graph->checkReachable(promise, write)) { + if (mo_graph->checkPromise(write, promise)) { + priv->failed_promise = true; return; } } @@ -1803,29 +2568,21 @@ void ModelChecker::compute_relseq_breakwrites(ModelAction *curr) /** * Build up an initial set of all past writes that this 'read' action may read - * from. This set is determined by the clock vector's "happens before" - * relationship. + * from, as well as any previously-observed future values that must still be valid. + * * @param curr is the current ModelAction that we are exploring; it must be a * 'read' operation. */ -void ModelChecker::build_reads_from_past(ModelAction *curr) +void ModelChecker::build_may_read_from(ModelAction *curr) { - std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location()); + std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); unsigned int i; ASSERT(curr->is_read()); - ModelAction *last_seq_cst = NULL; + ModelAction *last_sc_write = NULL; - /* Track whether this object has been initialized */ - bool initialized = false; - - if (curr->is_seqcst()) { - last_seq_cst = get_last_seq_cst(curr); - /* We have to at least see the last sequentially consistent write, - so we are initialized. */ - if (last_seq_cst != NULL) - initialized = true; - } + if (curr->is_seqcst()) + last_sc_write = get_last_seq_cst_write(curr); /* Iterate over all threads */ for (i = 0; i < thrd_lists->size(); i++) { @@ -1840,100 +2597,152 @@ void ModelChecker::build_reads_from_past(ModelAction *curr) continue; /* Don't consider more than one seq_cst write if we are a seq_cst read. */ - if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) { - DEBUG("Adding action to may_read_from:\n"); - if (DBG_ENABLED()) { - act->print(); - curr->print(); - } - curr->get_node()->add_read_from(act); + bool allow_read = true; + + if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write) + allow_read = false; + else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act)) + allow_read = false; + + if (allow_read) { + /* Only add feasible reads */ + mo_graph->startChanges(); + r_modification_order(curr, act); + if (!is_infeasible()) + curr->get_node()->add_read_from(act); + mo_graph->rollbackChanges(); } /* Include at most one act per-thread that "happens before" curr */ - if (act->happens_before(curr)) { - initialized = true; + if (act->happens_before(curr)) break; + } + } + + /* Inherit existing, promised future values */ + for (i = 0; i < promises->size(); i++) { + const Promise *promise = (*promises)[i]; + const ModelAction *promise_read = promise->get_action(); + if (promise_read->same_var(curr)) { + /* Only add feasible future-values */ + mo_graph->startChanges(); + r_modification_order(curr, promise); + if (!is_infeasible()) { + const struct future_value fv = promise->get_fv(); + curr->get_node()->add_future_value(fv); } + mo_graph->rollbackChanges(); } } - if (!initialized) { - /** @todo Need a more informative way of reporting errors. */ - printf("ERROR: may read from uninitialized atomic\n"); + /* We may find no valid may-read-from only if the execution is doomed */ + if (!curr->get_node()->get_read_from_size() && curr->get_node()->future_value_empty()) { + priv->no_valid_reads = true; + set_assert(); } - if (DBG_ENABLED() || !initialized) { - printf("Reached read action:\n"); + if (DBG_ENABLED()) { + model_print("Reached read action:\n"); curr->print(); - printf("Printing may_read_from\n"); + model_print("Printing may_read_from\n"); curr->get_node()->print_may_read_from(); - printf("End printing may_read_from\n"); + model_print("End printing may_read_from\n"); + } +} + +bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write) +{ + for ( ; write != NULL; write = write->get_reads_from()) { + /* UNINIT actions don't have a Node, and they never sleep */ + if (write->is_uninitialized()) + return true; + Node *prevnode = write->get_node()->get_parent(); + + bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET; + if (write->is_release() && thread_sleep) + return true; + if (!write->is_rmw()) + return false; } + return true; +} - ASSERT(initialized); +/** + * @brief Create a new action representing an uninitialized atomic + * @param location The memory location of the atomic object + * @return A pointer to a new ModelAction + */ +ModelAction * ModelChecker::new_uninitialized_action(void *location) const +{ + ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction)); + act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread); + act->create_cv(NULL); + return act; } static void print_list(action_list_t *list) { action_list_t::iterator it; - printf("---------------------------------------------------------------------\n"); - printf("Trace:\n"); + model_print("---------------------------------------------------------------------\n"); + + unsigned int hash = 0; for (it = list->begin(); it != list->end(); it++) { (*it)->print(); + hash = hash^(hash<<3)^((*it)->hash()); } - printf("---------------------------------------------------------------------\n"); + model_print("HASH %u\n", hash); + model_print("---------------------------------------------------------------------\n"); } #if SUPPORT_MOD_ORDER_DUMP -void ModelChecker::dumpGraph(char *filename) { +void ModelChecker::dumpGraph(char *filename) const +{ char buffer[200]; - sprintf(buffer, "%s.dot",filename); - FILE *file=fopen(buffer, "w"); - fprintf(file, "digraph %s {\n",filename); + sprintf(buffer, "%s.dot", filename); + FILE *file = fopen(buffer, "w"); + fprintf(file, "digraph %s {\n", filename); mo_graph->dumpNodes(file); - ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads()); - + ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads()); + for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) { - ModelAction *action=*it; + ModelAction *action = *it; if (action->is_read()) { - fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid()); - if (action->get_reads_from()!=NULL) + fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid()); + if (action->get_reads_from() != NULL) fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number()); } if (thread_array[action->get_tid()] != NULL) { fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number()); } - - thread_array[action->get_tid()]=action; + + thread_array[action->get_tid()] = action; } - fprintf(file,"}\n"); + fprintf(file, "}\n"); model_free(thread_array); - fclose(file); + fclose(file); } #endif -void ModelChecker::print_summary() +/** @brief Prints an execution trace summary. */ +void ModelChecker::print_summary() const { - printf("\n"); - printf("Number of executions: %d\n", num_executions); - printf("Number of feasible executions: %d\n", num_feasible_executions); - printf("Total nodes created: %d\n", node_stack->get_total_nodes()); - #if SUPPORT_MOD_ORDER_DUMP - scheduler->print(); char buffername[100]; - sprintf(buffername, "exec%04u", num_executions); + sprintf(buffername, "exec%04u", stats.num_total); mo_graph->dumpGraphToFile(buffername); - sprintf(buffername, "graph%04u", num_executions); - dumpGraph(buffername); + sprintf(buffername, "graph%04u", stats.num_total); + dumpGraph(buffername); #endif - if (!isfinalfeasible()) - printf("INFEASIBLE EXECUTION!\n"); + model_print("Execution %d:", stats.num_total); + if (isfeasibleprefix()) + model_print("\n"); + else + print_infeasibility(" INFEASIBLE"); print_list(action_trace); - printf("\n"); + model_print("\n"); } /** @@ -1948,7 +2757,7 @@ void ModelChecker::add_thread(Thread *t) } /** - * Removes a thread from the scheduler. + * Removes a thread from the scheduler. * @param the thread to remove. */ void ModelChecker::remove_thread(Thread *t) @@ -1971,11 +2780,44 @@ Thread * ModelChecker::get_thread(thread_id_t tid) const * @param act The ModelAction * @return A Thread reference */ -Thread * ModelChecker::get_thread(ModelAction *act) const +Thread * ModelChecker::get_thread(const ModelAction *act) const { return get_thread(act->get_tid()); } +/** + * @brief Check if a Thread is currently enabled + * @param t The Thread to check + * @return True if the Thread is currently enabled + */ +bool ModelChecker::is_enabled(Thread *t) const +{ + return scheduler->is_enabled(t); +} + +/** + * @brief Check if a Thread is currently enabled + * @param tid The ID of the Thread to check + * @return True if the Thread is currently enabled + */ +bool ModelChecker::is_enabled(thread_id_t tid) const +{ + return scheduler->is_enabled(tid); +} + +/** + * Switch from a model-checker context to a user-thread context. This is the + * complement of ModelChecker::switch_to_master and must be called from the + * model-checker context + * + * @param thread The user-thread to switch to + */ +void ModelChecker::switch_from_master(Thread *thread) +{ + scheduler->set_current_thread(thread); + Thread::swap(&system_context, thread); +} + /** * Switch from a user-context to the "master thread" context (a.k.a. system * context). This switch is made with the intention of exploring a particular @@ -1985,89 +2827,118 @@ Thread * ModelChecker::get_thread(ModelAction *act) const * @param act The current action that will be explored. May be NULL only if * trace is exiting via an assertion (see ModelChecker::set_assert and * ModelChecker::has_asserted). - * @return Return status from the 'swap' call (i.e., success/fail, 0/-1) + * @return Return the value returned by the current action */ -int ModelChecker::switch_to_master(ModelAction *act) +uint64_t ModelChecker::switch_to_master(ModelAction *act) { DBG(); Thread *old = thread_current(); - set_current_action(act); - old->set_state(THREAD_READY); - return Thread::swap(old, &system_context); + ASSERT(!old->get_pending()); + old->set_pending(act); + if (Thread::swap(old, &system_context) < 0) { + perror("swap threads"); + exit(EXIT_FAILURE); + } + return old->get_return_value(); } /** * Takes the next step in the execution, if possible. - * @return Returns true (success) if a step was taken and false otherwise. + * @param curr The current step to take + * @return Returns the next Thread to run, if any; NULL if this execution + * should terminate */ -bool ModelChecker::take_step() { - if (has_asserted()) - return false; - - Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL; - if (curr) { - if (curr->get_state() == THREAD_READY) { - ASSERT(priv->current_action); - - priv->nextThread = check_current_action(priv->current_action); - priv->current_action = NULL; +Thread * ModelChecker::take_step(ModelAction *curr) +{ + Thread *curr_thrd = get_thread(curr); + ASSERT(curr_thrd->get_state() == THREAD_READY); - if (curr->is_blocked() || curr->is_complete()) - scheduler->remove_thread(curr); - } else { - ASSERT(false); - } - } - Thread *next = scheduler->next_thread(priv->nextThread); + curr = check_current_action(curr); /* Infeasible -> don't take any more steps */ - if (!isfeasible()) - return false; + if (is_infeasible()) + return NULL; + else if (isfeasibleprefix() && have_bug_reports()) { + set_assert(); + return NULL; + } - DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1, - next ? id_to_int(next->get_id()) : -1); + if (params.bound != 0 && priv->used_sequence_numbers > params.bound) + return NULL; - /* - * Launch end-of-execution release sequence fixups only when there are: - * - * (1) no more user threads to run (or when execution replay chooses - * the 'model_thread') - * (2) pending release sequences - * (3) pending assertions (i.e., data races) - * (4) no pending promises - */ - if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) && - isfinalfeasible() && !unrealizedraces.empty()) { - printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n", - pending_rel_seqs->size()); - ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ, - std::memory_order_seq_cst, NULL, VALUE_NONE, - model_thread); - set_current_action(fixup); - return true; - } + if (curr_thrd->is_blocked() || curr_thrd->is_complete()) + scheduler->remove_thread(curr_thrd); - /* next == NULL -> don't take any more steps */ - if (!next) - return false; + Thread *next_thrd = get_next_thread(curr); - next->set_state(THREAD_RUNNING); + DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1, + next_thrd ? id_to_int(next_thrd->get_id()) : -1); - if (next->get_pending() != NULL) { - /* restart a pending action */ - set_current_action(next->get_pending()); - next->set_pending(NULL); - next->set_state(THREAD_READY); - return true; - } + return next_thrd; +} - /* Return false only if swap fails with an error */ - return (Thread::swap(&system_context, next) == 0); +/** Wrapper to run the user's main function, with appropriate arguments */ +void user_main_wrapper(void *) +{ + user_main(model->params.argc, model->params.argv); } -/** Runs the current execution until threre are no more steps to take. */ -void ModelChecker::finish_execution() { - DBG(); +/** @brief Run ModelChecker for the user program */ +void ModelChecker::run() +{ + do { + thrd_t user_thread; + Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL); + add_thread(t); + + do { + /* + * Stash next pending action(s) for thread(s). There + * should only need to stash one thread's action--the + * thread which just took a step--plus the first step + * for any newly-created thread + */ + for (unsigned int i = 0; i < get_num_threads(); i++) { + thread_id_t tid = int_to_id(i); + Thread *thr = get_thread(tid); + if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) { + switch_from_master(thr); + } + } + + /* Catch assertions from prior take_step or from + * between-ModelAction bugs (e.g., data races) */ + if (has_asserted()) + break; + + /* Consume the next action for a Thread */ + ModelAction *curr = t->get_pending(); + t->set_pending(NULL); + t = take_step(curr); + } while (t && !t->is_model_thread()); - while (take_step()); + /* + * Launch end-of-execution release sequence fixups only when + * the execution is otherwise feasible AND there are: + * + * (1) pending release sequences + * (2) pending assertions that could be invalidated by a change + * in clock vectors (i.e., data races) + * (3) no pending promises + */ + while (!pending_rel_seqs->empty() && + is_feasible_prefix_ignore_relseq() && + !unrealizedraces.empty()) { + model_print("*** WARNING: release sequence fixup action " + "(%zu pending release seuqence(s)) ***\n", + pending_rel_seqs->size()); + ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ, + std::memory_order_seq_cst, NULL, VALUE_NONE, + model_thread); + take_step(fixup); + }; + } while (next_execution()); + + model_print("******* Model-checking complete: *******\n"); + print_stats(); }