#include <stdio.h>
#include <algorithm>
-#include <mutex>
#include <new>
#include <stdarg.h>
#include "common.h"
#include "clockvector.h"
#include "cyclegraph.h"
-#include "promise.h"
#include "datarace.h"
#include "threads-model.h"
#include "bugmessage.h"
+#include "fuzzer.h"
-#define INITIAL_THREAD_ID 0
+#define INITIAL_THREAD_ID 0
/**
* Structure for holding small ModelChecker members that should be snapshotted
/* First thread created will have id INITIAL_THREAD_ID */
next_thread_id(INITIAL_THREAD_ID),
used_sequence_numbers(0),
- next_backtrack(NULL),
bugs(),
- failed_promise(false),
- too_many_reads(false),
- no_valid_reads(false),
bad_synchronization(false),
asserted(false)
{ }
~model_snapshot_members() {
- for (unsigned int i = 0; i < bugs.size(); i++)
+ for (unsigned int i = 0;i < bugs.size();i++)
delete bugs[i];
bugs.clear();
}
unsigned int next_thread_id;
modelclock_t used_sequence_numbers;
- ModelAction *next_backtrack;
SnapVector<bug_message *> bugs;
- bool failed_promise;
- bool too_many_reads;
- bool no_valid_reads;
/** @brief Incorrectly-ordered synchronization was made */
bool bad_synchronization;
bool asserted;
};
/** @brief Constructor */
-ModelExecution::ModelExecution(ModelChecker *m,
- struct model_params *params,
- Scheduler *scheduler,
- NodeStack *node_stack) :
+ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler, NodeStack *node_stack) :
model(m),
- params(params),
+ params(NULL),
scheduler(scheduler),
action_trace(),
- thread_map(2), /* We'll always need at least 2 threads */
+ thread_map(2), /* We'll always need at least 2 threads */
+ pthread_map(0),
+ pthread_counter(0),
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
- promises(),
- futurevalues(),
- pending_rel_seqs(),
+ mutex_map(),
thrd_last_action(1),
thrd_last_fence_release(),
node_stack(node_stack),
- priv(new struct model_snapshot_members()),
- mo_graph(new CycleGraph())
+ priv(new struct model_snapshot_members ()),
+ mo_graph(new CycleGraph()),
+ fuzzer(new Fuzzer())
{
/* Initialize a model-checker thread, for special ModelActions */
model_thread = new Thread(get_next_id());
/** @brief Destructor */
ModelExecution::~ModelExecution()
{
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
delete get_thread(int_to_id(i));
- for (unsigned int i = 0; i < promises.size(); i++)
- delete promises[i];
-
delete mo_graph;
delete priv;
}
void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
{
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *thr = get_thread(int_to_id(i));
if (scheduler->is_sleep_set(thr)) {
if (should_wake_up(curr, thr))
bool ModelExecution::is_deadlocked() const
{
bool blocking_threads = false;
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
if (is_enabled(tid))
return false;
*/
bool ModelExecution::is_complete_execution() const
{
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
if (is_enabled(int_to_id(i)))
return false;
return true;
}
-/**
- * @brief Find the last fence-related backtracking conflict for a ModelAction
- *
- * This function performs the search for the most recent conflicting action
- * against which we should perform backtracking, as affected by fence
- * operations. This includes pairs of potentially-synchronizing actions which
- * occur due to fence-acquire or fence-release, and hence should be explored in
- * the opposite execution order.
- *
- * @param act The current action
- * @return The most recent action which conflicts with act due to fences
- */
-ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
-{
- /* Only perform release/acquire fence backtracking for stores */
- if (!act->is_write())
- return NULL;
-
- /* Find a fence-release (or, act is a release) */
- ModelAction *last_release;
- if (act->is_release())
- last_release = act;
- else
- last_release = get_last_fence_release(act->get_tid());
- if (!last_release)
- return NULL;
-
- /* Skip past the release */
- const action_list_t *list = &action_trace;
- action_list_t::const_reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++)
- if (*rit == last_release)
- break;
- ASSERT(rit != list->rend());
-
- /* Find a prior:
- * load-acquire
- * or
- * load --sb-> fence-acquire */
- ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
- ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
- bool found_acquire_fences = false;
- for ( ; rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->same_thread(prev))
- continue;
-
- int tid = id_to_int(prev->get_tid());
-
- if (prev->is_read() && act->same_var(prev)) {
- if (prev->is_acquire()) {
- /* Found most recent load-acquire, don't need
- * to search for more fences */
- if (!found_acquire_fences)
- return NULL;
- } else {
- prior_loads[tid] = prev;
- }
- }
- if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
- found_acquire_fences = true;
- acquire_fences[tid] = prev;
- }
- }
-
- ModelAction *latest_backtrack = NULL;
- for (unsigned int i = 0; i < acquire_fences.size(); i++)
- if (acquire_fences[i] && prior_loads[i])
- if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
- latest_backtrack = acquire_fences[i];
- return latest_backtrack;
-}
/**
- * @brief Find the last backtracking conflict for a ModelAction
- *
- * This function performs the search for the most recent conflicting action
- * against which we should perform backtracking. This primary includes pairs of
- * synchronizing actions which should be explored in the opposite execution
- * order.
- *
- * @param act The current action
- * @return The most recent action which conflicts with act
- */
-ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
-{
- switch (act->get_type()) {
- case ATOMIC_FENCE:
- /* Only seq-cst fences can (directly) cause backtracking */
- if (!act->is_seqcst())
- break;
- case ATOMIC_READ:
- case ATOMIC_WRITE:
- case ATOMIC_RMW: {
- ModelAction *ret = NULL;
-
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (prev->could_synchronize_with(act)) {
- ret = prev;
- break;
- }
- }
-
- ModelAction *ret2 = get_last_fence_conflict(act);
- if (!ret2)
- return ret;
- if (!ret)
- return ret2;
- if (*ret < *ret2)
- return ret2;
- return ret;
- }
- case ATOMIC_LOCK:
- case ATOMIC_TRYLOCK: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->is_conflicting_lock(prev))
- return prev;
- }
- break;
- }
- case ATOMIC_UNLOCK: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_failed_trylock())
- return prev;
- }
- break;
- }
- case ATOMIC_WAIT: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_failed_trylock())
- return prev;
- if (!act->same_thread(prev) && prev->is_notify())
- return prev;
- }
- break;
- }
-
- case ATOMIC_NOTIFY_ALL:
- case ATOMIC_NOTIFY_ONE: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_wait())
- return prev;
- }
- break;
- }
- default:
- break;
- }
- return NULL;
-}
-
-/** This method finds backtracking points where we should try to
- * reorder the parameter ModelAction against.
- *
- * @param the ModelAction to find backtracking points for.
+ * Processes a read model action.
+ * @param curr is the read model action to process.
+ * @param rf_set is the set of model actions we can possibly read from
+ * @return True if processing this read updates the mo_graph.
*/
-void ModelExecution::set_backtracking(ModelAction *act)
+void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
- Thread *t = get_thread(act);
- ModelAction *prev = get_last_conflict(act);
- if (prev == NULL)
- return;
-
- Node *node = prev->get_node()->get_parent();
-
- /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
- int low_tid, high_tid;
- if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
- low_tid = id_to_int(act->get_tid());
- high_tid = low_tid + 1;
- } else {
- low_tid = 0;
- high_tid = get_num_threads();
- }
-
- for (int i = low_tid; i < high_tid; i++) {
- thread_id_t tid = int_to_id(i);
-
- /* Make sure this thread can be enabled here. */
- if (i >= node->get_num_threads())
- break;
-
- /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
- /* Don't backtrack into a point where the thread is disabled or sleeping. */
- if (node->enabled_status(tid) != THREAD_ENABLED)
- continue;
-
- /* Check if this has been explored already */
- if (node->has_been_explored(tid))
- continue;
-
- /* See if fairness allows */
- if (params->fairwindow != 0 && !node->has_priority(tid)) {
- bool unfair = false;
- for (int t = 0; t < node->get_num_threads(); t++) {
- thread_id_t tother = int_to_id(t);
- if (node->is_enabled(tother) && node->has_priority(tother)) {
- unfair = true;
- break;
- }
- }
- if (unfair)
- continue;
- }
-
- /* See if CHESS-like yield fairness allows */
- if (params->yieldon) {
- bool unfair = false;
- for (int t = 0; t < node->get_num_threads(); t++) {
- thread_id_t tother = int_to_id(t);
- if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
- unfair = true;
- break;
- }
- }
- if (unfair)
- continue;
- }
+ SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
+ while(true) {
- /* Cache the latest backtracking point */
- set_latest_backtrack(prev);
-
- /* If this is a new backtracking point, mark the tree */
- if (!node->set_backtrack(tid))
- continue;
- DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
- id_to_int(prev->get_tid()),
- id_to_int(t->get_id()));
- if (DBG_ENABLED()) {
- prev->print();
- act->print();
- }
- }
-}
+ int index = fuzzer->selectWrite(curr, rf_set);
+ const ModelAction *rf = (*rf_set)[index];
-/**
- * @brief Cache the a backtracking point as the "most recent", if eligible
- *
- * Note that this does not prepare the NodeStack for this backtracking
- * operation, it only caches the action on a per-execution basis
- *
- * @param act The operation at which we should explore a different next action
- * (i.e., backtracking point)
- * @return True, if this action is now the most recent backtracking point;
- * false otherwise
- */
-bool ModelExecution::set_latest_backtrack(ModelAction *act)
-{
- if (!priv->next_backtrack || *act > *priv->next_backtrack) {
- priv->next_backtrack = act;
- return true;
- }
- return false;
-}
-/**
- * Returns last backtracking point. The model checker will explore a different
- * path for this point in the next execution.
- * @return The ModelAction at which the next execution should diverge.
- */
-ModelAction * ModelExecution::get_next_backtrack()
-{
- ModelAction *next = priv->next_backtrack;
- priv->next_backtrack = NULL;
- return next;
-}
+ ASSERT(rf);
-/**
- * Processes a read model action.
- * @param curr is the read model action to process.
- * @return True if processing this read updates the mo_graph.
- */
-bool ModelExecution::process_read(ModelAction *curr)
-{
- Node *node = curr->get_node();
- while (true) {
- bool updated = false;
- switch (node->get_read_from_status()) {
- case READ_FROM_PAST: {
- const ModelAction *rf = node->get_read_from_past();
- ASSERT(rf);
-
- mo_graph->startChanges();
-
- ASSERT(!is_infeasible());
- if (!check_recency(curr, rf)) {
- if (node->increment_read_from()) {
- mo_graph->rollbackChanges();
- continue;
- } else {
- priv->too_many_reads = true;
- }
+ if (r_modification_order(curr, rf, priorset)) {
+ for(unsigned int i=0;i<priorset->size();i++) {
+ mo_graph->addEdge((*priorset)[i], rf);
}
-
- updated = r_modification_order(curr, rf);
read_from(curr, rf);
- mo_graph->commitChanges();
- mo_check_promises(curr, true);
- break;
- }
- case READ_FROM_PROMISE: {
- Promise *promise = curr->get_node()->get_read_from_promise();
- if (promise->add_reader(curr))
- priv->failed_promise = true;
- curr->set_read_from_promise(promise);
- mo_graph->startChanges();
- if (!check_recency(curr, promise))
- priv->too_many_reads = true;
- updated = r_modification_order(curr, promise);
- mo_graph->commitChanges();
- break;
- }
- case READ_FROM_FUTURE: {
- /* Read from future value */
- struct future_value fv = node->get_future_value();
- Promise *promise = new Promise(this, curr, fv);
- curr->set_read_from_promise(promise);
- promises.push_back(promise);
- mo_graph->startChanges();
- updated = r_modification_order(curr, promise);
- mo_graph->commitChanges();
- break;
- }
- default:
- ASSERT(false);
+ get_thread(curr)->set_return_value(curr->get_return_value());
+ delete priorset;
+ return;
}
- get_thread(curr)->set_return_value(curr->get_return_value());
- return updated;
+ priorset->clear();
+ (*rf_set)[index] = rf_set->back();
+ rf_set->pop_back();
}
}
*/
bool ModelExecution::process_mutex(ModelAction *curr)
{
- std::mutex *mutex = curr->get_mutex();
- struct std::mutex_state *state = NULL;
+ cdsc::mutex *mutex = curr->get_mutex();
+ struct cdsc::mutex_state *state = NULL;
if (mutex)
state = mutex->get_state();
}
get_thread(curr)->set_return_value(1);
}
- //otherwise fall into the lock case
+ //otherwise fall into the lock case
case ATOMIC_LOCK: {
if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
assert_bug("Lock access before initialization");
case ATOMIC_WAIT:
case ATOMIC_UNLOCK: {
/* wake up the other threads */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
state->locked = NULL;
if (!curr->is_wait())
- break; /* The rest is only for ATOMIC_WAIT */
+ break;/* The rest is only for ATOMIC_WAIT */
- /* Should we go to sleep? (simulate spurious failures) */
- if (curr->get_node()->get_misc() == 0) {
- get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
- /* disable us */
- scheduler->sleep(get_thread(curr));
- }
break;
}
case ATOMIC_NOTIFY_ALL: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
- for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
+ for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
scheduler->wake(get_thread(*rit));
}
waiters->clear();
}
case ATOMIC_NOTIFY_ONE: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
- int wakeupthread = curr->get_node()->get_misc();
- action_list_t::iterator it = waiters->begin();
- advance(it, wakeupthread);
- scheduler->wake(get_thread(*it));
- waiters->erase(it);
+ Thread * thread = fuzzer->selectNotify(waiters);
+ scheduler->wake(thread);
break;
}
return false;
}
-/**
- * @brief Check if the current pending promises allow a future value to be sent
- *
- * If one of the following is true:
- * (a) there are no pending promises
- * (b) the reader and writer do not cross any promises
- * Then, it is safe to pass a future value back now.
- *
- * Otherwise, we must save the pending future value until (a) or (b) is true
- *
- * @param writer The operation which sends the future value. Must be a write.
- * @param reader The operation which will observe the value. Must be a read.
- * @return True if the future value can be sent now; false if it must wait.
- */
-bool ModelExecution::promises_may_allow(const ModelAction *writer,
- const ModelAction *reader) const
-{
- if (promises.empty())
- return true;
- for (int i = promises.size() - 1; i >= 0; i--) {
- ModelAction *pr = promises[i]->get_reader(0);
- //reader is after promise...doesn't cross any promise
- if (*reader > *pr)
- return true;
- //writer is after promise, reader before...bad...
- if (*writer > *pr)
- return false;
- }
- return true;
-}
-
-/**
- * @brief Add a future value to a reader
- *
- * This function performs a few additional checks to ensure that the future
- * value can be feasibly observed by the reader
- *
- * @param writer The operation whose value is sent. Must be a write.
- * @param reader The read operation which may read the future value. Must be a read.
- */
-void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
-{
- /* Do more ambitious checks now that mo is more complete */
- if (!mo_may_allow(writer, reader))
- return;
-
- Node *node = reader->get_node();
-
- /* Find an ancestor thread which exists at the time of the reader */
- Thread *write_thread = get_thread(writer);
- while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
- write_thread = write_thread->get_parent();
-
- struct future_value fv = {
- writer->get_write_value(),
- writer->get_seq_number() + params->maxfuturedelay,
- write_thread->get_id(),
- };
- if (node->add_future_value(fv))
- set_latest_backtrack(reader);
-}
-
/**
* Process a write ModelAction
* @param curr The ModelAction to process
* @return True if the mo_graph was updated or promises were resolved
*/
-bool ModelExecution::process_write(ModelAction *curr)
+void ModelExecution::process_write(ModelAction *curr)
{
- /* Readers to which we may send our future value */
- ModelVector<ModelAction *> send_fv;
-
- const ModelAction *earliest_promise_reader;
- bool updated_promises = false;
- bool updated_mod_order = w_modification_order(curr, &send_fv);
- Promise *promise = pop_promise_to_resolve(curr);
+ w_modification_order(curr);
- if (promise) {
- earliest_promise_reader = promise->get_reader(0);
- updated_promises = resolve_promise(curr, promise);
- } else
- earliest_promise_reader = NULL;
-
- for (unsigned int i = 0; i < send_fv.size(); i++) {
- ModelAction *read = send_fv[i];
-
- /* Don't send future values to reads after the Promise we resolve */
- if (!earliest_promise_reader || *read < *earliest_promise_reader) {
- /* Check if future value can be sent immediately */
- if (promises_may_allow(curr, read)) {
- add_future_value(curr, read);
- } else {
- futurevalues.push_back(PendingFutureValue(curr, read));
- }
- }
- }
-
- /* Check the pending future values */
- for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
- struct PendingFutureValue pfv = futurevalues[i];
- if (promises_may_allow(pfv.writer, pfv.reader)) {
- add_future_value(pfv.writer, pfv.reader);
- futurevalues.erase(futurevalues.begin() + i);
- }
- }
-
- mo_graph->commitChanges();
- mo_check_promises(curr, false);
get_thread(curr)->set_return_value(VALUE_NONE);
- return updated_mod_order || updated_promises;
}
/**
action_list_t *list = &action_trace;
action_list_t::reverse_iterator rit;
/* Find X : is_read(X) && X --sb-> curr */
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr)
continue;
/* Establish hypothetical release sequences */
rel_heads_list_t release_heads;
get_release_seq_heads(curr, act, &release_heads);
- for (unsigned int i = 0; i < release_heads.size(); i++)
+ for (unsigned int i = 0;i < release_heads.size();i++)
synchronize(release_heads[i], curr);
if (release_heads.size() != 0)
updated = true;
thrd_t *thrd = (thrd_t *)curr->get_location();
struct thread_params *params = (struct thread_params *)curr->get_value();
Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
+ curr->set_thread_operand(th);
add_thread(th);
th->set_creation(curr);
- /* Promises can be satisfied by children */
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->thread_is_available(curr->get_tid()))
- promise->add_thread(th->get_id());
- }
+ break;
+ }
+ case PTHREAD_CREATE: {
+ (*(uint32_t *)curr->get_location()) = pthread_counter++;
+
+ struct pthread_params *params = (struct pthread_params *)curr->get_value();
+ Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
+ curr->set_thread_operand(th);
+ add_thread(th);
+ th->set_creation(curr);
+
+ if ( pthread_map.size() < pthread_counter )
+ pthread_map.resize( pthread_counter );
+ pthread_map[ pthread_counter-1 ] = th;
+
break;
}
case THREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
+ case PTHREAD_JOIN: {
+ Thread *blocking = curr->get_thread_operand();
+ ModelAction *act = get_last_action(blocking->get_id());
+ synchronize(act, curr);
+ updated = true; /* trigger rel-seq checks */
+ break; // WL: to be add (modified)
+ }
+
case THREAD_FINISH: {
Thread *th = get_thread(curr);
/* Wake up any joining threads */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
if (waiting->waiting_on() == th &&
waiting->get_pending()->is_thread_join())
scheduler->wake(waiting);
}
th->complete();
- /* Completed thread can't satisfy promises */
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->thread_is_available(th->get_id()))
- if (promise->eliminate_thread(th->get_id()))
- priv->failed_promise = true;
- }
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_START: {
- check_promises(curr->get_tid(), NULL, curr->get_cv());
break;
}
default:
return updated;
}
-/**
- * @brief Process the current action for release sequence fixup activity
- *
- * Performs model-checker release sequence fixups for the current action,
- * forcing a single pending release sequence to break (with a given, potential
- * "loose" write) or to complete (i.e., synchronize). If a pending release
- * sequence forms a complete release sequence, then we must perform the fixup
- * synchronization, mo_graph additions, etc.
- *
- * @param curr The current action; must be a release sequence fixup action
- * @param work_queue The work queue to which to add work items as they are
- * generated
- */
-void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
-{
- const ModelAction *write = curr->get_node()->get_relseq_break();
- struct release_seq *sequence = pending_rel_seqs.back();
- pending_rel_seqs.pop_back();
- ASSERT(sequence);
- ModelAction *acquire = sequence->acquire;
- const ModelAction *rf = sequence->rf;
- const ModelAction *release = sequence->release;
- ASSERT(acquire);
- ASSERT(release);
- ASSERT(rf);
- ASSERT(release->same_thread(rf));
-
- if (write == NULL) {
- /**
- * @todo Forcing a synchronization requires that we set
- * modification order constraints. For instance, we can't allow
- * a fixup sequence in which two separate read-acquire
- * operations read from the same sequence, where the first one
- * synchronizes and the other doesn't. Essentially, we can't
- * allow any writes to insert themselves between 'release' and
- * 'rf'
- */
-
- /* Must synchronize */
- if (!synchronize(release, acquire))
- return;
- /* Re-check all pending release sequences */
- work_queue->push_back(CheckRelSeqWorkEntry(NULL));
- /* Re-check act for mo_graph edges */
- work_queue->push_back(MOEdgeWorkEntry(acquire));
-
- /* propagate synchronization to later actions */
- action_list_t::reverse_iterator rit = action_trace.rbegin();
- for (; (*rit) != acquire; rit++) {
- ModelAction *propagate = *rit;
- if (acquire->happens_before(propagate)) {
- synchronize(acquire, propagate);
- /* Re-check 'propagate' for mo_graph edges */
- work_queue->push_back(MOEdgeWorkEntry(propagate));
- }
- }
- } else {
- /* Break release sequence with new edges:
- * release --mo--> write --mo--> rf */
- mo_graph->addEdge(release, write);
- mo_graph->addEdge(write, rf);
- }
-
- /* See if we have realized a data race */
- checkDataRaces();
-}
-
/**
* Initialize the current action by performing one or more of the following
* actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
newcurr = process_rmw(*curr);
delete *curr;
- if (newcurr->is_rmw())
- compute_promises(newcurr);
-
*curr = newcurr;
return false;
}
(*curr)->set_seq_number(get_next_seq_num());
- newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
+ newcurr = node_stack->explore_action(*curr);
if (newcurr) {
/* First restore type and order in case of RMW operation */
if ((*curr)->is_rmwr())
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
*curr = newcurr;
- return false; /* Action was explored previously */
+ return false; /* Action was explored previously */
} else {
newcurr = *curr;
/* Assign most recent release fence */
newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
- /*
- * Perform one-time actions when pushing new ModelAction onto
- * NodeStack
- */
- if (newcurr->is_write())
- compute_promises(newcurr);
- else if (newcurr->is_relseq_fixup())
- compute_relseq_breakwrites(newcurr);
- else if (newcurr->is_wait())
- newcurr->get_node()->set_misc_max(2);
- else if (newcurr->is_notify_one()) {
- newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
- }
- return true; /* This was a new ModelAction */
+ return true; /* This was a new ModelAction */
}
}
*
* @return True if this read established synchronization
*/
+
bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
{
ASSERT(rf);
rel_heads_list_t release_heads;
get_release_seq_heads(act, act, &release_heads);
int num_heads = release_heads.size();
- for (unsigned int i = 0; i < release_heads.size(); i++)
+ for (unsigned int i = 0;i < release_heads.size();i++)
if (!synchronize(release_heads[i], act))
num_heads--;
return num_heads > 0;
set_bad_synchronization();
return false;
}
- check_promises(first->get_tid(), second->get_cv(), first->get_cv());
return second->synchronize_with(first);
}
-/**
- * Check promises and eliminate potentially-satisfying threads when a thread is
- * blocked (e.g., join, lock). A thread which is waiting on another thread can
- * no longer satisfy a promise generated from that thread.
- *
- * @param blocker The thread on which a thread is waiting
- * @param waiting The waiting thread
- */
-void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (!promise->thread_is_available(waiting->get_id()))
- continue;
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- ModelAction *reader = promise->get_reader(j);
- if (reader->get_tid() != blocker->get_id())
- continue;
- if (promise->eliminate_thread(waiting->get_id())) {
- /* Promise has failed */
- priv->failed_promise = true;
- } else {
- /* Only eliminate the 'waiting' thread once */
- return;
- }
- }
- }
-}
-
/**
* @brief Check whether a model action is enabled.
*
- * Checks whether a lock or join operation would be successful (i.e., is the
- * lock already locked, or is the joined thread already complete). If not, put
- * the action in a waiter list.
+ * Checks whether an operation would be successful (i.e., is a lock already
+ * locked, or is the joined thread already complete).
+ *
+ * For yield-blocking, yields are never enabled.
*
* @param curr is the ModelAction to check whether it is enabled.
* @return a bool that indicates whether the action is enabled.
*/
bool ModelExecution::check_action_enabled(ModelAction *curr) {
if (curr->is_lock()) {
- std::mutex *lock = curr->get_mutex();
- struct std::mutex_state *state = lock->get_state();
+ cdsc::mutex *lock = curr->get_mutex();
+ struct cdsc::mutex_state *state = lock->get_state();
if (state->locked)
return false;
} else if (curr->is_thread_join()) {
Thread *blocking = curr->get_thread_operand();
if (!blocking->is_complete()) {
- thread_blocking_check_promises(blocking, get_thread(curr));
return false;
}
}
wake_up_sleeping_actions(curr);
- /* Compute fairness information for CHESS yield algorithm */
- if (params->yieldon) {
- curr->get_node()->update_yield(scheduler);
- }
-
/* Add the action to lists before any other model-checking tasks */
if (!second_part_of_rmw)
add_action_to_lists(curr);
+ SnapVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
- build_may_read_from(curr);
-
- /* Initialize work_queue with the "current action" work */
- work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
- while (!work_queue.empty() && !has_asserted()) {
- WorkQueueEntry work = work_queue.front();
- work_queue.pop_front();
-
- switch (work.type) {
- case WORK_CHECK_CURR_ACTION: {
- ModelAction *act = work.action;
- bool update = false; /* update this location's release seq's */
- bool update_all = false; /* update all release seq's */
-
- if (process_thread_action(curr))
- update_all = true;
-
- if (act->is_read() && !second_part_of_rmw && process_read(act))
- update = true;
+ rf_set = build_may_read_from(curr);
- if (act->is_write() && process_write(act))
- update = true;
+ process_thread_action(curr);
- if (act->is_fence() && process_fence(act))
- update_all = true;
-
- if (act->is_mutex_op() && process_mutex(act))
- update_all = true;
+ if (curr->is_read() && !second_part_of_rmw) {
+ process_read(curr, rf_set);
+ delete rf_set;
+ } else {
+ ASSERT(rf_set == NULL);
+ }
- if (act->is_relseq_fixup())
- process_relseq_fixup(curr, &work_queue);
+ if (curr->is_write())
+ process_write(curr);
- if (update_all)
- work_queue.push_back(CheckRelSeqWorkEntry(NULL));
- else if (update)
- work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
- break;
- }
- case WORK_CHECK_RELEASE_SEQ:
- resolve_release_sequences(work.location, &work_queue);
- break;
- case WORK_CHECK_MO_EDGES: {
- /** @todo Complete verification of work_queue */
- ModelAction *act = work.action;
- bool updated = false;
-
- if (act->is_read()) {
- const ModelAction *rf = act->get_reads_from();
- const Promise *promise = act->get_reads_from_promise();
- if (rf) {
- if (r_modification_order(act, rf))
- updated = true;
- } else if (promise) {
- if (r_modification_order(act, promise))
- updated = true;
- }
- }
- if (act->is_write()) {
- if (w_modification_order(act, NULL))
- updated = true;
- }
- mo_graph->commitChanges();
+ if (curr->is_fence())
+ process_fence(curr);
- if (updated)
- work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
- break;
- }
- default:
- ASSERT(false);
- break;
- }
- }
+ if (curr->is_mutex_op())
+ process_mutex(curr);
- check_curr_backtracking(curr);
- set_backtracking(curr);
return curr;
}
-void ModelExecution::check_curr_backtracking(ModelAction *curr)
-{
- Node *currnode = curr->get_node();
- Node *parnode = currnode->get_parent();
-
- if ((parnode && !parnode->backtrack_empty()) ||
- !currnode->misc_empty() ||
- !currnode->read_from_empty() ||
- !currnode->promise_empty() ||
- !currnode->relseq_break_empty()) {
- set_latest_backtrack(curr);
- }
-}
-
-bool ModelExecution::promises_expired() const
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->get_expiration() < priv->used_sequence_numbers)
- return true;
- }
- return false;
-}
-
/**
* This is the strongest feasibility check available.
* @return whether the current trace (partial or complete) must be a prefix of
*/
bool ModelExecution::isfeasibleprefix() const
{
- return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
+ return !is_infeasible();
}
/**
{
char buf[100];
char *ptr = buf;
- if (mo_graph->checkForCycles())
- ptr += sprintf(ptr, "[mo cycle]");
- if (priv->failed_promise)
- ptr += sprintf(ptr, "[failed promise]");
- if (priv->too_many_reads)
- ptr += sprintf(ptr, "[too many reads]");
- if (priv->no_valid_reads)
- ptr += sprintf(ptr, "[no valid reads-from]");
if (priv->bad_synchronization)
ptr += sprintf(ptr, "[bad sw ordering]");
- if (promises_expired())
- ptr += sprintf(ptr, "[promise expired]");
- if (promises.size() != 0)
- ptr += sprintf(ptr, "[unresolved promise]");
if (ptr != buf)
- model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
-}
-
-/**
- * Returns whether the current completed trace is feasible, except for pending
- * release sequences.
- */
-bool ModelExecution::is_feasible_prefix_ignore_relseq() const
-{
- return !is_infeasible() && promises.size() == 0;
+ model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
}
/**
*/
bool ModelExecution::is_infeasible() const
{
- return mo_graph->checkForCycles() ||
- priv->no_valid_reads ||
- priv->failed_promise ||
- priv->too_many_reads ||
- priv->bad_synchronization ||
- promises_expired();
+ return priv->bad_synchronization;
}
/** Close out a RMWR by converting previous RMWR into a RMW or READ. */
ModelAction *lastread = get_last_action(act->get_tid());
lastread->process_rmw(act);
if (act->is_rmw()) {
- if (lastread->get_reads_from())
- mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
- else
- mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
- mo_graph->commitChanges();
+ mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
}
return lastread;
}
-/**
- * A helper function for ModelExecution::check_recency, to check if the current
- * thread is able to read from a different write/promise for 'params.maxreads'
- * number of steps and if that write/promise should become visible (i.e., is
- * ordered later in the modification order). This helps model memory liveness.
- *
- * @param curr The current action. Must be a read.
- * @param rf The write/promise from which we plan to read
- * @param other_rf The write/promise from which we may read
- * @return True if we were able to read from other_rf for params.maxreads steps
- */
-template <typename T, typename U>
-bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
-{
- /* Need a different write/promise */
- if (other_rf->equals(rf))
- return false;
-
- /* Only look for "newer" writes/promises */
- if (!mo_graph->checkReachable(rf, other_rf))
- return false;
-
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
- action_list_t::reverse_iterator rit = list->rbegin();
- ASSERT((*rit) == curr);
- /* Skip past curr */
- rit++;
-
- /* Does this write/promise work for everyone? */
- for (int i = 0; i < params->maxreads; i++, rit++) {
- ModelAction *act = *rit;
- if (!act->may_read_from(other_rf))
- return false;
- }
- return true;
-}
-
-/**
- * Checks whether a thread has read from the same write or Promise for too many
- * times without seeing the effects of a later write/Promise.
- *
- * Basic idea:
- * 1) there must a different write/promise that we could read from,
- * 2) we must have read from the same write/promise in excess of maxreads times,
- * 3) that other write/promise must have been in the reads_from set for maxreads times, and
- * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
- *
- * If so, we decide that the execution is no longer feasible.
- *
- * @param curr The current action. Must be a read.
- * @param rf The ModelAction/Promise from which we might read.
- * @return True if the read should succeed; false otherwise
- */
-template <typename T>
-bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
-{
- if (!params->maxreads)
- return true;
-
- //NOTE: Next check is just optimization, not really necessary....
- if (curr->get_node()->get_read_from_past_size() +
- curr->get_node()->get_read_from_promise_size() <= 1)
- return true;
-
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- int tid = id_to_int(curr->get_tid());
- ASSERT(tid < (int)thrd_lists->size());
- action_list_t *list = &(*thrd_lists)[tid];
- action_list_t::reverse_iterator rit = list->rbegin();
- ASSERT((*rit) == curr);
- /* Skip past curr */
- rit++;
-
- action_list_t::reverse_iterator ritcopy = rit;
- /* See if we have enough reads from the same value */
- for (int count = 0; count < params->maxreads; ritcopy++, count++) {
- if (ritcopy == list->rend())
- return true;
- ModelAction *act = *ritcopy;
- if (!act->is_read())
- return true;
- if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
- return true;
- if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
- return true;
- if (act->get_node()->get_read_from_past_size() +
- act->get_node()->get_read_from_promise_size() <= 1)
- return true;
- }
- for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
- const ModelAction *write = curr->get_node()->get_read_from_past(i);
- if (should_read_instead(curr, rf, write))
- return false; /* liveness failure */
- }
- for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
- const Promise *promise = curr->get_node()->get_read_from_promise(i);
- if (should_read_instead(curr, rf, promise))
- return false; /* liveness failure */
- }
- return true;
-}
-
/**
* @brief Updates the mo_graph with the constraints imposed from the current
* read.
* @param rf The ModelAction or Promise that curr reads from. Must be a write.
* @return True if modification order edges were added; false otherwise
*/
-template <typename rf_type>
-bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
+
+bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
- bool added = false;
ASSERT(curr->is_read());
/* Last SC fence in the current thread */
last_sc_write = get_last_seq_cst_write(curr);
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Last SC fence in thread i */
ModelAction *last_sc_fence_thread_local = NULL;
if (int_to_id((int)i) != curr->get_tid())
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Skip curr */
/* C++, Section 29.3 statement 5 */
if (curr->is_seqcst() && last_sc_fence_thread_local &&
*act < *last_sc_fence_thread_local) {
- added = mo_graph->addEdge(act, rf) || added;
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
else if (act->is_seqcst() && last_sc_fence_local &&
- *act < *last_sc_fence_local) {
- added = mo_graph->addEdge(act, rf) || added;
+ *act < *last_sc_fence_local) {
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
else if (last_sc_fence_thread_before &&
- *act < *last_sc_fence_thread_before) {
- added = mo_graph->addEdge(act, rf) || added;
+ *act < *last_sc_fence_thread_before) {
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
break;
}
}
- /* C++, Section 29.3 statement 3 (second subpoint) */
- if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
- added = mo_graph->addEdge(act, rf) || added;
- break;
- }
-
/*
* Include at most one act per-thread that "happens
* before" curr
*/
if (act->happens_before(curr)) {
if (act->is_write()) {
- added = mo_graph->addEdge(act, rf) || added;
+ if (mo_graph->checkReachable(rf, act))
+ return false;
+ priorset->push_back(act);
} else {
const ModelAction *prevrf = act->get_reads_from();
- const Promise *prevrf_promise = act->get_reads_from_promise();
- if (prevrf) {
- if (!prevrf->equals(rf))
- added = mo_graph->addEdge(prevrf, rf) || added;
- } else if (!prevrf_promise->equals(rf)) {
- added = mo_graph->addEdge(prevrf_promise, rf) || added;
+ if (!prevrf->equals(rf)) {
+ if (mo_graph->checkReachable(rf, prevrf))
+ return false;
+ priorset->push_back(prevrf);
}
}
break;
}
}
}
-
- /*
- * All compatible, thread-exclusive promises must be ordered after any
- * concrete loads from the same thread
- */
- for (unsigned int i = 0; i < promises.size(); i++)
- if (promises[i]->is_compatible_exclusive(curr))
- added = mo_graph->addEdge(rf, promises[i]) || added;
-
- return added;
+ return true;
}
/**
* value. If NULL, then don't record any future values.
* @return True if modification order edges were added; false otherwise
*/
-bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
+void ModelExecution::w_modification_order(ModelAction *curr)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
- bool added = false;
ASSERT(curr->is_write());
if (curr->is_seqcst()) {
/* We have to at least see the last sequentially consistent write,
- so we are initialized. */
+ so we are initialized. */
ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
if (last_seq_cst != NULL) {
- added = mo_graph->addEdge(last_seq_cst, curr) || added;
+ mo_graph->addEdge(last_seq_cst, curr);
}
}
ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Last SC fence in thread i, before last SC fence in current thread */
ModelAction *last_sc_fence_thread_before = NULL;
if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ bool force_edge = false;
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr) {
/*
* 3) If normal write, we need to look at earlier actions, so
* continue processing list.
*/
+ force_edge = true;
if (curr->is_rmw()) {
if (curr->get_reads_from() != NULL)
break;
/* C++, Section 29.3 statement 7 */
if (last_sc_fence_thread_before && act->is_write() &&
*act < *last_sc_fence_thread_before) {
- added = mo_graph->addEdge(act, curr) || added;
+ mo_graph->addEdge(act, curr, force_edge);
break;
}
* readfrom(act) --mo--> act
*/
if (act->is_write())
- added = mo_graph->addEdge(act, curr) || added;
+ mo_graph->addEdge(act, curr, force_edge);
else if (act->is_read()) {
//if previous read accessed a null, just keep going
- if (act->get_reads_from() == NULL)
- continue;
- added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
+ mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
}
break;
} else if (act->is_read() && !act->could_synchronize_with(curr) &&
- !act->same_thread(curr)) {
+ !act->same_thread(curr)) {
/* We have an action that:
(1) did not happen before us
(2) is a read and we are a write
pendingfuturevalue.
*/
- if (send_fv && thin_air_constraint_may_allow(curr, act)) {
- if (!is_infeasible())
- send_fv->push_back(act);
- else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
- add_future_value(curr, act);
- }
+
}
}
}
-
- /*
- * All compatible, thread-exclusive promises must be ordered after any
- * concrete stores to the same thread, or else they can be merged with
- * this store later
- */
- for (unsigned int i = 0; i < promises.size(); i++)
- if (promises[i]->is_compatible_exclusive(curr))
- added = mo_graph->addEdge(curr, promises[i]) || added;
-
- return added;
-}
-
-/** Arbitrary reads from the future are not allowed. Section 29.3
- * part 9 places some constraints. This method checks one result of constraint
- * constraint. Others require compiler support. */
-bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
-{
- if (!writer->is_rmw())
- return true;
-
- if (!reader->is_rmw())
- return true;
-
- for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
- if (search == reader)
- return false;
- if (search->get_tid() == reader->get_tid() &&
- search->happens_before(reader))
- break;
- }
-
- return true;
}
/**
* require compiler support):
*
* If X --hb-> Y --mo-> Z, then X should not read from Z.
+ * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
*/
bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
unsigned int i;
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
const ModelAction *write_after_read = NULL;
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Don't disallow due to act == reader */
* @param release_heads A pass-by-reference style return parameter. After
* execution of this function, release_heads will contain the heads of all the
* relevant release sequences, if any exists with certainty
- * @param pending A pass-by-reference style return parameter which is only used
- * when returning false (i.e., uncertain). Returns most information regarding
- * an uncertain release sequence, including any write operations that might
- * break the sequence.
* @return true, if the ModelExecution is certain that release_heads is complete;
* false otherwise
*/
-bool ModelExecution::release_seq_heads(const ModelAction *rf,
- rel_heads_list_t *release_heads,
- struct release_seq *pending) const
+bool ModelExecution::release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const
{
- /* Only check for release sequences if there are no cycles */
- if (mo_graph->checkForCycles())
- return false;
- for ( ; rf != NULL; rf = rf->get_reads_from()) {
+ for ( ;rf != NULL;rf = rf->get_reads_from()) {
ASSERT(rf->is_write());
if (rf->is_release())
else if (rf->get_last_fence_release())
release_heads->push_back(rf->get_last_fence_release());
if (!rf->is_rmw())
- break; /* End of RMW chain */
+ break;/* End of RMW chain */
/** @todo Need to be smarter here... In the linux lock
* example, this will run to the beginning of the program for
/* acq_rel RMW is a sufficient stopping condition */
if (rf->is_acquire() && rf->is_release())
- return true; /* complete */
+ return true;/* complete */
};
- if (!rf) {
- /* read from future: need to settle this later */
- pending->rf = NULL;
- return false; /* incomplete */
- }
+ ASSERT(rf); // Needs to be real write
if (rf->is_release())
- return true; /* complete */
+ return true;/* complete */
/* else relaxed write
* - check for fence-release in the same thread (29.8, stmt. 3)
if (fence_release)
release_heads->push_back(fence_release);
- int tid = id_to_int(rf->get_tid());
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
- action_list_t *list = &(*thrd_lists)[tid];
- action_list_t::const_reverse_iterator rit;
-
- /* Find rf in the thread list */
- rit = std::find(list->rbegin(), list->rend(), rf);
- ASSERT(rit != list->rend());
-
- /* Find the last {write,fence}-release */
- for (; rit != list->rend(); rit++) {
- if (fence_release && *(*rit) < *fence_release)
- break;
- if ((*rit)->is_release())
- break;
- }
- if (rit == list->rend()) {
- /* No write-release in this thread */
- return true; /* complete */
- } else if (fence_release && *(*rit) < *fence_release) {
- /* The fence-release is more recent (and so, "stronger") than
- * the most recent write-release */
- return true; /* complete */
- } /* else, need to establish contiguous release sequence */
- ModelAction *release = *rit;
-
- ASSERT(rf->same_thread(release));
-
- pending->writes.clear();
-
- bool certain = true;
- for (unsigned int i = 0; i < thrd_lists->size(); i++) {
- if (id_to_int(rf->get_tid()) == (int)i)
- continue;
- list = &(*thrd_lists)[i];
-
- /* Can we ensure no future writes from this thread may break
- * the release seq? */
- bool future_ordered = false;
-
- ModelAction *last = get_last_action(int_to_id(i));
- Thread *th = get_thread(int_to_id(i));
- if ((last && rf->happens_before(last)) ||
- !is_enabled(th) ||
- th->is_complete())
- future_ordered = true;
-
- ASSERT(!th->is_model_thread() || future_ordered);
-
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- const ModelAction *act = *rit;
- /* Reach synchronization -> this thread is complete */
- if (act->happens_before(release))
- break;
- if (rf->happens_before(act)) {
- future_ordered = true;
- continue;
- }
-
- /* Only non-RMW writes can break release sequences */
- if (!act->is_write() || act->is_rmw())
- continue;
-
- /* Check modification order */
- if (mo_graph->checkReachable(rf, act)) {
- /* rf --mo--> act */
- future_ordered = true;
- continue;
- }
- if (mo_graph->checkReachable(act, release))
- /* act --mo--> release */
- break;
- if (mo_graph->checkReachable(release, act) &&
- mo_graph->checkReachable(act, rf)) {
- /* release --mo-> act --mo--> rf */
- return true; /* complete */
- }
- /* act may break release sequence */
- pending->writes.push_back(act);
- certain = false;
- }
- if (!future_ordered)
- certain = false; /* This thread is uncertain */
- }
-
- if (certain) {
- release_heads->push_back(release);
- pending->writes.clear();
- } else {
- pending->release = release;
- pending->rf = rf;
- }
- return certain;
+ return true; /* complete */
}
/**
* @see ModelExecution::release_seq_heads
*/
void ModelExecution::get_release_seq_heads(ModelAction *acquire,
- ModelAction *read, rel_heads_list_t *release_heads)
+ ModelAction *read, rel_heads_list_t *release_heads)
{
const ModelAction *rf = read->get_reads_from();
- struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
- sequence->acquire = acquire;
- sequence->read = read;
-
- if (!release_seq_heads(rf, release_heads, sequence)) {
- /* add act to 'lazy checking' list */
- pending_rel_seqs.push_back(sequence);
- } else {
- snapshot_free(sequence);
- }
-}
-
-/**
- * Attempt to resolve all stashed operations that might synchronize with a
- * release sequence for a given location. This implements the "lazy" portion of
- * determining whether or not a release sequence was contiguous, since not all
- * modification order information is present at the time an action occurs.
- *
- * @param location The location/object that should be checked for release
- * sequence resolutions. A NULL value means to check all locations.
- * @param work_queue The work queue to which to add work items as they are
- * generated
- * @return True if any updates occurred (new synchronization, new mo_graph
- * edges)
- */
-bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
-{
- bool updated = false;
- SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
- while (it != pending_rel_seqs.end()) {
- struct release_seq *pending = *it;
- ModelAction *acquire = pending->acquire;
- const ModelAction *read = pending->read;
-
- /* Only resolve sequences on the given location, if provided */
- if (location && read->get_location() != location) {
- it++;
- continue;
- }
-
- const ModelAction *rf = read->get_reads_from();
- rel_heads_list_t release_heads;
- bool complete;
- complete = release_seq_heads(rf, &release_heads, pending);
- for (unsigned int i = 0; i < release_heads.size(); i++)
- if (!acquire->has_synchronized_with(release_heads[i]))
- if (synchronize(release_heads[i], acquire))
- updated = true;
-
- if (updated) {
- /* Re-check all pending release sequences */
- work_queue->push_back(CheckRelSeqWorkEntry(NULL));
- /* Re-check read-acquire for mo_graph edges */
- if (acquire->is_read())
- work_queue->push_back(MOEdgeWorkEntry(acquire));
-
- /* propagate synchronization to later actions */
- action_list_t::reverse_iterator rit = action_trace.rbegin();
- for (; (*rit) != acquire; rit++) {
- ModelAction *propagate = *rit;
- if (acquire->happens_before(propagate)) {
- synchronize(acquire, propagate);
- /* Re-check 'propagate' for mo_graph edges */
- work_queue->push_back(MOEdgeWorkEntry(propagate));
- }
- }
- }
- if (complete) {
- it = pending_rel_seqs.erase(it);
- snapshot_free(pending);
- } else {
- it++;
- }
- }
-
- // If we resolved promises or data races, see if we have realized a data race.
- checkDataRaces();
- return updated;
+ release_seq_heads(rf, release_heads);
}
/**
action_list_t *list = obj_map.get(location);
/* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); (*rit) != curr; rit++)
+ for (rit = list->rbegin();(*rit) != curr;rit++)
;
- rit++; /* Skip past curr */
- for ( ; rit != list->rend(); rit++)
+ rit++; /* Skip past curr */
+ for ( ;rit != list->rend();rit++)
if ((*rit)->is_write() && (*rit)->is_seqcst())
return *rit;
return NULL;
action_list_t::reverse_iterator rit = list->rbegin();
if (before_fence) {
- for (; rit != list->rend(); rit++)
+ for (;rit != list->rend();rit++)
if (*rit == before_fence)
break;
rit++;
}
- for (; rit != list->rend(); rit++)
+ for (;rit != list->rend();rit++)
if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
return *rit;
return NULL;
ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
{
void *location = curr->get_location();
+
action_list_t *list = obj_map.get(location);
/* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++)
+ for (rit = list->rbegin();rit != list->rend();rit++)
if ((*rit)->is_unlock() || (*rit)->is_wait())
return *rit;
return NULL;
return get_parent_action(tid)->get_cv();
}
-/**
- * @brief Find the promise (if any) to resolve for the current action and
- * remove it from the pending promise vector
- * @param curr The current ModelAction. Should be a write.
- * @return The Promise to resolve, if any; otherwise NULL
- */
-Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
-{
- for (unsigned int i = 0; i < promises.size(); i++)
- if (curr->get_node()->get_promise(i)) {
- Promise *ret = promises[i];
- promises.erase(promises.begin() + i);
- return ret;
- }
- return NULL;
-}
-
-/**
- * Resolve a Promise with a current write.
- * @param write The ModelAction that is fulfilling Promises
- * @param promise The Promise to resolve
- * @return True if the Promise was successfully resolved; false otherwise
- */
-bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
-{
- ModelVector<ModelAction *> actions_to_check;
-
- for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
- ModelAction *read = promise->get_reader(i);
- read_from(read, write);
- actions_to_check.push_back(read);
- }
- /* Make sure the promise's value matches the write's value */
- ASSERT(promise->is_compatible(write) && promise->same_value(write));
- if (!mo_graph->resolvePromise(promise, write))
- priv->failed_promise = true;
-
- /**
- * @todo It is possible to end up in an inconsistent state, where a
- * "resolved" promise may still be referenced if
- * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
- *
- * Note that the inconsistency only matters when dumping mo_graph to
- * file.
- *
- * delete promise;
- */
-
- //Check whether reading these writes has made threads unable to
- //resolve promises
- for (unsigned int i = 0; i < actions_to_check.size(); i++) {
- ModelAction *read = actions_to_check[i];
- mo_check_promises(read, true);
- }
-
- return true;
-}
-
-/**
- * Compute the set of promises that could potentially be satisfied by this
- * action. Note that the set computation actually appears in the Node, not in
- * ModelExecution.
- * @param curr The ModelAction that may satisfy promises
- */
-void ModelExecution::compute_promises(ModelAction *curr)
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (!promise->is_compatible(curr) || !promise->same_value(curr))
- continue;
-
- bool satisfy = true;
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- const ModelAction *act = promise->get_reader(j);
- if (act->happens_before(curr) ||
- act->could_synchronize_with(curr)) {
- satisfy = false;
- break;
- }
- }
- if (satisfy)
- curr->get_node()->set_promise(i);
- }
-}
-
-/** Checks promises in response to change in ClockVector Threads. */
-void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (!promise->thread_is_available(tid))
- continue;
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- const ModelAction *act = promise->get_reader(j);
- if ((!old_cv || !old_cv->synchronized_since(act)) &&
- merge_cv->synchronized_since(act)) {
- if (promise->eliminate_thread(tid)) {
- /* Promise has failed */
- priv->failed_promise = true;
- return;
- }
- }
- }
- }
-}
-
-void ModelExecution::check_promises_thread_disabled()
-{
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
- if (promise->has_failed()) {
- priv->failed_promise = true;
- return;
- }
- }
-}
-
-/**
- * @brief Checks promises in response to addition to modification order for
- * threads.
- *
- * We test whether threads are still available for satisfying promises after an
- * addition to our modification order constraints. Those that are unavailable
- * are "eliminated". Once all threads are eliminated from satisfying a promise,
- * that promise has failed.
- *
- * @param act The ModelAction which updated the modification order
- * @param is_read_check Should be true if act is a read and we must check for
- * updates to the store from which it read (there is a distinction here for
- * RMW's, which are both a load and a store)
- */
-void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
-{
- const ModelAction *write = is_read_check ? act->get_reads_from() : act;
-
- for (unsigned int i = 0; i < promises.size(); i++) {
- Promise *promise = promises[i];
-
- // Is this promise on the same location?
- if (!promise->same_location(write))
- continue;
-
- for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
- const ModelAction *pread = promise->get_reader(j);
- if (!pread->happens_before(act))
- continue;
- if (mo_graph->checkPromise(write, promise)) {
- priv->failed_promise = true;
- return;
- }
- break;
- }
-
- // Don't do any lookups twice for the same thread
- if (!promise->thread_is_available(act->get_tid()))
- continue;
-
- if (mo_graph->checkReachable(promise, write)) {
- if (mo_graph->checkPromise(write, promise)) {
- priv->failed_promise = true;
- return;
- }
- }
- }
-}
-
-/**
- * Compute the set of writes that may break the current pending release
- * sequence. This information is extracted from previou release sequence
- * calculations.
- *
- * @param curr The current ModelAction. Must be a release sequence fixup
- * action.
- */
-void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
-{
- if (pending_rel_seqs.empty())
- return;
-
- struct release_seq *pending = pending_rel_seqs.back();
- for (unsigned int i = 0; i < pending->writes.size(); i++) {
- const ModelAction *write = pending->writes[i];
- curr->get_node()->add_relseq_break(write);
+bool valequals(uint64_t val1, uint64_t val2, int size) {
+ switch(size) {
+ case 1:
+ return ((uint8_t)val1) == ((uint8_t)val2);
+ case 2:
+ return ((uint16_t)val1) == ((uint16_t)val2);
+ case 4:
+ return ((uint32_t)val1) == ((uint32_t)val2);
+ case 8:
+ return val1==val2;
+ default:
+ ASSERT(0);
+ return false;
}
-
- /* NULL means don't break the sequence; just synchronize */
- curr->get_node()->add_relseq_break(NULL);
}
/**
* @param curr is the current ModelAction that we are exploring; it must be a
* 'read' operation.
*/
-void ModelExecution::build_may_read_from(ModelAction *curr)
+SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
if (curr->is_seqcst())
last_sc_write = get_last_seq_cst_write(curr);
+ SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
+
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Only consider 'write' actions */
if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
allow_read = false;
- else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
- allow_read = false;
+
+ /* Need to check whether we will have two RMW reading from the same value */
+ if (curr->is_rmwr()) {
+ /* It is okay if we have a failing CAS */
+ if (!curr->is_rmwrcas() ||
+ valequals(curr->get_value(), act->get_value(), curr->getSize())) {
+ //Need to make sure we aren't the second RMW
+ CycleNode * node = mo_graph->getNode_noCreate(act);
+ if (node != NULL && node->getRMW() != NULL) {
+ //we are the second RMW
+ allow_read = false;
+ }
+ }
+ }
if (allow_read) {
/* Only add feasible reads */
- mo_graph->startChanges();
- r_modification_order(curr, act);
- if (!is_infeasible())
- curr->get_node()->add_read_from_past(act);
- mo_graph->rollbackChanges();
+ rf_set->push_back(act);
}
/* Include at most one act per-thread that "happens before" curr */
}
}
- /* Inherit existing, promised future values */
- for (i = 0; i < promises.size(); i++) {
- const Promise *promise = promises[i];
- const ModelAction *promise_read = promise->get_reader(0);
- if (promise_read->same_var(curr)) {
- /* Only add feasible future-values */
- mo_graph->startChanges();
- r_modification_order(curr, promise);
- if (!is_infeasible())
- curr->get_node()->add_read_from_promise(promise_read);
- mo_graph->rollbackChanges();
- }
- }
-
- /* We may find no valid may-read-from only if the execution is doomed */
- if (!curr->get_node()->read_from_size()) {
- priv->no_valid_reads = true;
- set_assert();
- }
-
if (DBG_ENABLED()) {
model_print("Reached read action:\n");
curr->print();
- model_print("Printing read_from_past\n");
- curr->get_node()->print_read_from_past();
model_print("End printing read_from_past\n");
}
-}
-
-bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
-{
- for ( ; write != NULL; write = write->get_reads_from()) {
- /* UNINIT actions don't have a Node, and they never sleep */
- if (write->is_uninitialized())
- return true;
- Node *prevnode = write->get_node()->get_parent();
-
- bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
- if (write->is_release() && thread_sleep)
- return true;
- if (!write->is_rmw())
- return false;
- }
- return true;
+ return rf_set;
}
/**
{
action_list_t::const_iterator it;
- model_print("---------------------------------------------------------------------\n");
+ model_print("------------------------------------------------------------------------------------\n");
+ model_print("# t Action type MO Location Value Rf CV\n");
+ model_print("------------------------------------------------------------------------------------\n");
unsigned int hash = 0;
- for (it = list->begin(); it != list->end(); it++) {
+ for (it = list->begin();it != list->end();it++) {
const ModelAction *act = *it;
if (act->get_seq_number() > 0)
act->print();
hash = hash^(hash<<3)^((*it)->hash());
}
model_print("HASH %u\n", hash);
- model_print("---------------------------------------------------------------------\n");
+ model_print("------------------------------------------------------------------------------------\n");
}
#if SUPPORT_MOD_ORDER_DUMP
mo_graph->dumpNodes(file);
ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
- for (action_list_t::iterator it = action_trace.begin(); it != action_trace.end(); it++) {
+ for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
ModelAction *act = *it;
if (act->is_read()) {
mo_graph->dot_print_node(file, act);
- if (act->get_reads_from())
- mo_graph->dot_print_edge(file,
- act->get_reads_from(),
- act,
- "label=\"rf\", color=red, weight=2");
- else
- mo_graph->dot_print_edge(file,
- act->get_reads_from_promise(),
- act,
- "label=\"rf\", color=red");
+ mo_graph->dot_print_edge(file,
+ act->get_reads_from(),
+ act,
+ "label=\"rf\", color=red, weight=2");
}
if (thread_array[act->get_tid()]) {
mo_graph->dot_print_edge(file,
- thread_array[id_to_int(act->get_tid())],
- act,
- "label=\"sb\", color=blue, weight=400");
+ thread_array[id_to_int(act->get_tid())],
+ act,
+ "label=\"sb\", color=blue, weight=400");
}
thread_array[act->get_tid()] = act;
dumpGraph(buffername);
#endif
- model_print("Execution %d:", get_execution_number());
+ model_print("Execution trace %d:", get_execution_number());
if (isfeasibleprefix()) {
if (scheduler->all_threads_sleeping())
model_print(" SLEEP-SET REDUNDANT");
- model_print("\n");
+ if (have_bug_reports())
+ model_print(" DETECTED BUG(S)");
} else
print_infeasibility(" INFEASIBLE");
+ model_print("\n");
+
print_list(&action_trace);
model_print("\n");
- if (!promises.empty()) {
- model_print("Pending promises:\n");
- for (unsigned int i = 0; i < promises.size(); i++) {
- model_print(" [P%u] ", i);
- promises[i]->print();
- }
- model_print("\n");
- }
+
}
/**
}
/**
- * @brief Get a Promise's "promise number"
- *
- * A "promise number" is an index number that is unique to a promise, valid
- * only for a specific snapshot of an execution trace. Promises may come and go
- * as they are generated an resolved, so an index only retains meaning for the
- * current snapshot.
- *
- * @param promise The Promise to check
- * @return The promise index, if the promise still is valid; otherwise -1
+ * @brief Get a Thread reference by its pthread ID
+ * @param index The pthread's ID
+ * @return A Thread reference
*/
-int ModelExecution::get_promise_number(const Promise *promise) const
-{
- for (unsigned int i = 0; i < promises.size(); i++)
- if (promises[i] == promise)
- return i;
- /* Not found */
- return -1;
+Thread * ModelExecution::get_pthread(pthread_t pid) {
+ union {
+ pthread_t p;
+ uint32_t v;
+ } x;
+ x.p = pid;
+ uint32_t thread_id = x.v;
+ if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
+ else return NULL;
}
/**
/* Do not split atomic RMW */
if (curr->is_rmwr())
return get_thread(curr);
+ if (curr->is_write()) {
+ std::memory_order order = curr->get_mo();
+ switch(order) {
+ case std::memory_order_relaxed:
+ return get_thread(curr);
+ case std::memory_order_release:
+ return get_thread(curr);
+ default:
+ return NULL;
+ }
+ }
+
/* Follow CREATE with the created thread */
+ /* which is not needed, because model.cc takes care of this */
if (curr->get_type() == THREAD_CREATE)
return curr->get_thread_operand();
+ if (curr->get_type() == PTHREAD_CREATE) {
+ return curr->get_thread_operand();
+ }
return NULL;
}
-/** @return True if the execution has taken too many steps */
-bool ModelExecution::too_many_steps() const
-{
- return params->bound != 0 && priv->used_sequence_numbers > params->bound;
-}
-
/**
* Takes the next step in the execution, if possible.
* @param curr The current step to take
Thread *curr_thrd = get_thread(curr);
ASSERT(curr_thrd->get_state() == THREAD_READY);
- ASSERT(check_action_enabled(curr)); /* May have side effects? */
+ ASSERT(check_action_enabled(curr)); /* May have side effects? */
curr = check_current_action(curr);
ASSERT(curr);
return action_select_next_thread(curr);
}
-/**
- * Launch end-of-execution release sequence fixups only when
- * the execution is otherwise feasible AND there are:
- *
- * (1) pending release sequences
- * (2) pending assertions that could be invalidated by a change
- * in clock vectors (i.e., data races)
- * (3) no pending promises
- */
-void ModelExecution::fixup_release_sequences()
-{
- while (!pending_rel_seqs.empty() &&
- is_feasible_prefix_ignore_relseq() &&
- !unrealizedraces.empty()) {
- model_print("*** WARNING: release sequence fixup action "
- "(%zu pending release seuqence(s)) ***\n",
- pending_rel_seqs.size());
- ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
- std::memory_order_seq_cst, NULL, VALUE_NONE,
- model_thread);
- take_step(fixup);
- };
+Fuzzer * ModelExecution::getFuzzer() {
+ return fuzzer;
}