#include "model.h"
#include "execution.h"
#include "action.h"
-#include "nodestack.h"
#include "schedule.h"
#include "common.h"
#include "clockvector.h"
#include "datarace.h"
#include "threads-model.h"
#include "bugmessage.h"
+#include "history.h"
#include "fuzzer.h"
+#include "newfuzzer.h"
#define INITIAL_THREAD_ID 0
next_thread_id(INITIAL_THREAD_ID),
used_sequence_numbers(0),
bugs(),
- bad_synchronization(false),
asserted(false)
{ }
modelclock_t used_sequence_numbers;
SnapVector<bug_message *> bugs;
/** @brief Incorrectly-ordered synchronization was made */
- bool bad_synchronization;
bool asserted;
SNAPSHOTALLOC
};
/** @brief Constructor */
-ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler, NodeStack *node_stack) :
+ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
model(m),
params(NULL),
scheduler(scheduler),
- action_trace(),
thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
- pthread_counter(0),
+ pthread_counter(1),
+ action_trace(),
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
mutex_map(),
thrd_last_action(1),
thrd_last_fence_release(),
- node_stack(node_stack),
priv(new struct model_snapshot_members ()),
- mo_graph(new CycleGraph()),
- fuzzer(new Fuzzer())
+ mo_graph(new CycleGraph()),
+ fuzzer(new Fuzzer()),
+ isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
model_thread = new Thread(get_next_id());
add_thread(model_thread);
+ fuzzer->register_engine(this);
scheduler->register_engine(this);
- node_stack->register_engine(this);
+#ifdef TLS
+ pthread_key_create(&pthreadkey, tlsdestructor);
+#endif
}
/** @brief Destructor */
return model->get_execution_number();
}
-static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
+static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
{
action_list_t *tmp = hash->get(ptr);
if (tmp == NULL) {
return tmp;
}
-static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
+static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
{
SnapVector<action_list_t> *tmp = hash->get(ptr);
if (tmp == NULL) {
return tmp;
}
-action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
-{
- SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
- if (wrv==NULL)
- return NULL;
- unsigned int thread=id_to_int(tid);
- if (thread < wrv->size())
- return &(*wrv)[thread];
- else
- return NULL;
-}
-
/** @return a thread ID for a new Thread */
thread_id_t ModelExecution::get_next_id()
{
return ++priv->used_sequence_numbers;
}
+/** @return a sequence number for a new ModelAction */
+modelclock_t ModelExecution::get_curr_seq_num()
+{
+ return priv->used_sequence_numbers;
+}
+
+/** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
+void ModelExecution::restore_last_seq_num()
+{
+ priv->used_sequence_numbers--;
+}
+
/**
* @brief Should the current action wake up a given thread?
*
if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
return true;
}
+ /* The sleep is literally sleeping */
+ if (asleep->is_sleep()) {
+ if (fuzzer->shouldWake(asleep))
+ return true;
+ }
+
return false;
}
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *thr = get_thread(int_to_id(i));
if (scheduler->is_sleep_set(thr)) {
- if (should_wake_up(curr, thr))
+ if (should_wake_up(curr, thr)) {
/* Remove this thread from sleep set */
scheduler->remove_sleep(thr);
+ if (thr->get_pending()->is_sleep())
+ thr->set_wakeup_state(true);
+ }
}
}
}
-/** @brief Alert the model-checker that an incorrectly-ordered
- * synchronization was made */
-void ModelExecution::set_bad_synchronization()
-{
- priv->bad_synchronization = true;
-}
-
-bool ModelExecution::assert_bug(const char *msg)
+void ModelExecution::assert_bug(const char *msg)
{
priv->bugs.push_back(new bug_message(msg));
-
- if (isfeasibleprefix()) {
- set_assert();
- return true;
- }
- return false;
+ set_assert();
}
/** @return True, if any bugs have been reported for this execution */
return true;
}
+ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
+ uint64_t value = *((const uint64_t *) location);
+ modelclock_t storeclock;
+ thread_id_t storethread;
+ getStoreThreadAndClock(location, &storethread, &storeclock);
+ setAtomicStoreFlag(location);
+ ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
+ act->set_seq_number(storeclock);
+ add_normal_write_to_lists(act);
+ add_write_to_lists(act);
+ w_modification_order(act);
+ model->get_history()->process_action(act, act->get_tid());
+ return act;
+}
/**
* Processes a read model action.
* @param curr is the read model action to process.
* @param rf_set is the set of model actions we can possibly read from
- * @return True if processing this read updates the mo_graph.
+ * @return True if the read can be pruned from the thread map list.
*/
-void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
+bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
- SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
- while(true) {
+ SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
+ bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
+ if (hasnonatomicstore) {
+ ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
+ rf_set->push_back(nonatomicstore);
+ }
+
+ // Remove writes that violate read modification order
+ /*
+ uint i = 0;
+ while (i < rf_set->size()) {
+ ModelAction * rf = (*rf_set)[i];
+ if (!r_modification_order(curr, rf, NULL, NULL, true)) {
+ (*rf_set)[i] = rf_set->back();
+ rf_set->pop_back();
+ } else
+ i++;
+ }*/
+ while(true) {
int index = fuzzer->selectWrite(curr, rf_set);
- const ModelAction *rf = (*rf_set)[index];
+ ModelAction *rf = (*rf_set)[index];
ASSERT(rf);
-
- if (r_modification_order(curr, rf, priorset)) {
+ bool canprune = false;
+ if (r_modification_order(curr, rf, priorset, &canprune)) {
for(unsigned int i=0;i<priorset->size();i++) {
mo_graph->addEdge((*priorset)[i], rf);
}
read_from(curr, rf);
get_thread(curr)->set_return_value(curr->get_return_value());
delete priorset;
- return;
+ return canprune && (curr->get_type() == ATOMIC_READ);
}
priorset->clear();
(*rf_set)[index] = rf_set->back();
}
//otherwise fall into the lock case
case ATOMIC_LOCK: {
- if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
- assert_bug("Lock access before initialization");
+ //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
+ //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
+ // assert_bug("Lock access before initialization");
state->locked = get_thread(curr);
ModelAction *unlock = get_last_unlock(curr);
//synchronize with the previous unlock statement
}
break;
}
- case ATOMIC_WAIT:
+ case ATOMIC_WAIT: {
+ //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
+ if (fuzzer->shouldWait(curr)) {
+ /* wake up the other threads */
+ for (unsigned int i = 0;i < get_num_threads();i++) {
+ Thread *t = get_thread(int_to_id(i));
+ Thread *curr_thrd = get_thread(curr);
+ if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
+ scheduler->wake(t);
+ }
+
+ /* unlock the lock - after checking who was waiting on it */
+ state->locked = NULL;
+
+ /* disable this thread */
+ get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
+ scheduler->sleep(get_thread(curr));
+ }
+
+ break;
+ }
+ case ATOMIC_TIMEDWAIT:
case ATOMIC_UNLOCK: {
+ //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
+ //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
+ //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
+ //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
+ //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
+ //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
+ //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
+ //MUST EVENMTUALLY RELEASE...
+
/* wake up the other threads */
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
/* unlock the lock - after checking who was waiting on it */
state->locked = NULL;
-
- if (!curr->is_wait())
- break;/* The rest is only for ATOMIC_WAIT */
-
break;
}
case ATOMIC_NOTIFY_ALL: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
- for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
- scheduler->wake(get_thread(*rit));
+ for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
+ scheduler->wake(get_thread(rit->getVal()));
}
waiters->clear();
break;
}
case ATOMIC_NOTIFY_ONE: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
- Thread * thread = fuzzer->selectNotify(waiters);
- scheduler->wake(thread);
+ if (waiters->size() != 0) {
+ Thread * thread = fuzzer->selectNotify(waiters);
+ scheduler->wake(thread);
+ }
break;
}
*/
void ModelExecution::process_write(ModelAction *curr)
{
-
w_modification_order(curr);
-
-
get_thread(curr)->set_return_value(VALUE_NONE);
}
bool updated = false;
if (curr->is_acquire()) {
action_list_t *list = &action_trace;
- action_list_t::reverse_iterator rit;
+ sllnode<ModelAction *> * rit;
/* Find X : is_read(X) && X --sb-> curr */
- for (rit = list->rbegin();rit != list->rend();rit++) {
- ModelAction *act = *rit;
+ for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
+ ModelAction *act = rit->getVal();
if (act == curr)
continue;
if (act->get_tid() != curr->get_tid())
continue;
/* Establish hypothetical release sequences */
- rel_heads_list_t release_heads;
- get_release_seq_heads(curr, act, &release_heads);
- for (unsigned int i = 0;i < release_heads.size();i++)
- synchronize(release_heads[i], curr);
- if (release_heads.size() != 0)
+ ClockVector *cv = get_hb_from_write(act->get_reads_from());
+ if (cv != NULL && curr->get_cv()->merge(cv))
updated = true;
}
}
* @param curr The current action
* @return True if synchronization was updated or a thread completed
*/
-bool ModelExecution::process_thread_action(ModelAction *curr)
+void ModelExecution::process_thread_action(ModelAction *curr)
{
- bool updated = false;
-
switch (curr->get_type()) {
case THREAD_CREATE: {
thrd_t *thrd = (thrd_t *)curr->get_location();
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
break;
}
case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
break; // WL: to be add (modified)
}
+ case THREADONLY_FINISH:
case THREAD_FINISH: {
Thread *th = get_thread(curr);
+ if (curr->get_type() == THREAD_FINISH &&
+ th == model->getInitThread()) {
+ th->complete();
+ setFinished();
+ break;
+ }
+
/* Wake up any joining threads */
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
scheduler->wake(waiting);
}
th->complete();
- updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_START: {
break;
}
+ case THREAD_SLEEP: {
+ Thread *th = get_thread(curr);
+ th->set_pending(curr);
+ scheduler->add_sleep(th);
+ break;
+ }
default:
break;
}
-
- return updated;
}
/**
* Initialize the current action by performing one or more of the following
- * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
- * in the NodeStack, manipulating backtracking sets, allocating and
+ * actions, as appropriate: merging RMWR and RMWC/RMW actions,
+ * manipulating backtracking sets, allocating and
* initializing clock vectors, and computing the promises to fulfill.
*
* @param curr The current action, as passed from the user context; may be
*/
bool ModelExecution::initialize_curr_action(ModelAction **curr)
{
- ModelAction *newcurr;
-
if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
- newcurr = process_rmw(*curr);
+ ModelAction *newcurr = process_rmw(*curr);
delete *curr;
*curr = newcurr;
return false;
- }
-
- (*curr)->set_seq_number(get_next_seq_num());
-
- newcurr = node_stack->explore_action(*curr);
- if (newcurr) {
- /* First restore type and order in case of RMW operation */
- if ((*curr)->is_rmwr())
- newcurr->copy_typeandorder(*curr);
-
- ASSERT((*curr)->get_location() == newcurr->get_location());
- newcurr->copy_from_new(*curr);
-
- /* Discard duplicate ModelAction; use action from NodeStack */
- delete *curr;
-
- /* Always compute new clock vector */
- newcurr->create_cv(get_parent_action(newcurr->get_tid()));
-
- *curr = newcurr;
- return false; /* Action was explored previously */
} else {
- newcurr = *curr;
+ ModelAction *newcurr = *curr;
+ newcurr->set_seq_number(get_next_seq_num());
/* Always compute new clock vector */
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
* @return True if this read established synchronization
*/
-bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
+void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
{
ASSERT(rf);
ASSERT(rf->is_write());
act->set_read_from(rf);
if (act->is_acquire()) {
- rel_heads_list_t release_heads;
- get_release_seq_heads(act, act, &release_heads);
- int num_heads = release_heads.size();
- for (unsigned int i = 0;i < release_heads.size();i++)
- if (!synchronize(release_heads[i], act))
- num_heads--;
- return num_heads > 0;
+ ClockVector *cv = get_hb_from_write(rf);
+ if (cv == NULL)
+ return;
+ act->get_cv()->merge(cv);
}
- return false;
}
/**
bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
{
if (*second < *first) {
- set_bad_synchronization();
+ ASSERT(0); //This should not happend
return false;
}
return second->synchronize_with(first);
if (!blocking->is_complete()) {
return false;
}
+ } else if (curr->is_sleep()) {
+ if (!fuzzer->shouldSleep(curr))
+ return false;
}
return true;
wake_up_sleeping_actions(curr);
- /* Add the action to lists before any other model-checking tasks */
- if (!second_part_of_rmw)
- add_action_to_lists(curr);
-
SnapVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
rf_set = build_may_read_from(curr);
- process_thread_action(curr);
+ bool canprune = false;
if (curr->is_read() && !second_part_of_rmw) {
- process_read(curr, rf_set);
+ canprune = process_read(curr, rf_set);
delete rf_set;
- } else {
+ } else
ASSERT(rf_set == NULL);
- }
+
+ /* Add the action to lists */
+ if (!second_part_of_rmw)
+ add_action_to_lists(curr, canprune);
+
+ if (curr->is_write())
+ add_write_to_lists(curr);
+
+ process_thread_action(curr);
if (curr->is_write())
process_write(curr);
return curr;
}
-/**
- * This is the strongest feasibility check available.
- * @return whether the current trace (partial or complete) must be a prefix of
- * a feasible trace.
- */
-bool ModelExecution::isfeasibleprefix() const
-{
- return !is_infeasible();
-}
-
-/**
- * Print disagnostic information about an infeasible execution
- * @param prefix A string to prefix the output with; if NULL, then a default
- * message prefix will be provided
- */
-void ModelExecution::print_infeasibility(const char *prefix) const
-{
- char buf[100];
- char *ptr = buf;
- if (priv->bad_synchronization)
- ptr += sprintf(ptr, "[bad sw ordering]");
- if (ptr != buf)
- model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
-}
-
-/**
- * Check if the current partial trace is infeasible. Does not check any
- * end-of-execution flags, which might rule out the execution. Thus, this is
- * useful only for ruling an execution as infeasible.
- * @return whether the current partial trace is infeasible.
- */
-bool ModelExecution::is_infeasible() const
-{
- return priv->bad_synchronization;
-}
-
/** Close out a RMWR by converting previous RMWR into a RMW or READ. */
ModelAction * ModelExecution::process_rmw(ModelAction *act) {
ModelAction *lastread = get_last_action(act->get_tid());
*
* @param curr The current action. Must be a read.
* @param rf The ModelAction or Promise that curr reads from. Must be a write.
+ * @param check_only If true, then only check whether the current action satisfies
+ * read modification order or not, without modifiying priorset and canprune.
+ * False by default.
* @return True if modification order edges were added; false otherwise
*/
-bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset)
+bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
+ SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- unsigned int i;
ASSERT(curr->is_read());
/* Last SC fence in the current thread */
ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
- ModelAction *last_sc_write = NULL;
- if (curr->is_seqcst())
- last_sc_write = get_last_seq_cst_write(curr);
+ int tid = curr->get_tid();
+
+ /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
+ if ((int)thrd_lists->size() <= tid) {
+ uint oldsize = thrd_lists->size();
+ thrd_lists->resize(priv->next_thread_id);
+ for(uint i = oldsize;i < priv->next_thread_id;i++)
+ new (&(*thrd_lists)[i]) action_list_t();
+ }
+
+ ModelAction *prev_same_thread = NULL;
/* Iterate over all threads */
- for (i = 0;i < thrd_lists->size();i++) {
- /* Last SC fence in thread i */
+ for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
+ /* Last SC fence in thread tid */
ModelAction *last_sc_fence_thread_local = NULL;
- if (int_to_id((int)i) != curr->get_tid())
- last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
+ if (i != 0)
+ last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
- /* Last SC fence in thread i, before last SC fence in current thread */
+ /* Last SC fence in thread tid, before last SC fence in current thread */
ModelAction *last_sc_fence_thread_before = NULL;
if (last_sc_fence_local)
- last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
+ last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
+
+ //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
+ if (prev_same_thread != NULL &&
+ (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
+ (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
+ continue;
+ }
/* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin();rit != list->rend();rit++) {
- ModelAction *act = *rit;
+ action_list_t *list = &(*thrd_lists)[tid];
+ sllnode<ModelAction *> * rit;
+ for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
+ ModelAction *act = rit->getVal();
/* Skip curr */
if (act == curr)
*act < *last_sc_fence_thread_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
*act < *last_sc_fence_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
*act < *last_sc_fence_thread_before) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
}
* before" curr
*/
if (act->happens_before(curr)) {
+ if (i==0) {
+ if (last_sc_fence_local == NULL ||
+ (*last_sc_fence_local < *act)) {
+ prev_same_thread = act;
+ }
+ }
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
} else {
- const ModelAction *prevrf = act->get_reads_from();
+ ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf)) {
if (mo_graph->checkReachable(rf, prevrf))
return false;
- priorset->push_back(prevrf);
+ if (!check_only)
+ priorset->push_back(prevrf);
+ } else {
+ if (act->get_tid() == curr->get_tid()) {
+ //Can prune curr from obj list
+ if (!check_only)
+ *canprune = true;
+ }
}
}
break;
unsigned int i;
ASSERT(curr->is_write());
+ SnapList<ModelAction *> edgeset;
+
if (curr->is_seqcst()) {
/* We have to at least see the last sequentially consistent write,
so we are initialized. */
ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
if (last_seq_cst != NULL) {
- mo_graph->addEdge(last_seq_cst, curr);
+ edgeset.push_back(last_seq_cst);
}
+ //update map for next query
+ obj_last_sc_map.put(curr->get_location(), curr);
}
/* Last SC fence in the current thread */
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
- action_list_t::reverse_iterator rit;
- bool force_edge = false;
- for (rit = list->rbegin();rit != list->rend();rit++) {
- ModelAction *act = *rit;
+ sllnode<ModelAction*>* rit;
+ for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
+ ModelAction *act = rit->getVal();
if (act == curr) {
/*
* 1) If RMW and it actually read from something, then we
* 3) If normal write, we need to look at earlier actions, so
* continue processing list.
*/
- force_edge = true;
if (curr->is_rmw()) {
if (curr->get_reads_from() != NULL)
break;
/* C++, Section 29.3 statement 7 */
if (last_sc_fence_thread_before && act->is_write() &&
*act < *last_sc_fence_thread_before) {
- mo_graph->addEdge(act, curr, force_edge);
+ edgeset.push_back(act);
break;
}
* readfrom(act) --mo--> act
*/
if (act->is_write())
- mo_graph->addEdge(act, curr, force_edge);
+ edgeset.push_back(act);
else if (act->is_read()) {
//if previous read accessed a null, just keep going
- mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
+ edgeset.push_back(act->get_reads_from());
}
break;
- } else if (act->is_read() && !act->could_synchronize_with(curr) &&
- !act->same_thread(curr)) {
- /* We have an action that:
- (1) did not happen before us
- (2) is a read and we are a write
- (3) cannot synchronize with us
- (4) is in a different thread
- =>
- that read could potentially read from our write. Note that
- these checks are overly conservative at this point, we'll
- do more checks before actually removing the
- pendingfuturevalue.
-
- */
-
}
}
}
+ mo_graph->addEdges(&edgeset, curr);
+
}
/**
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin();rit != list->rend();rit++) {
- ModelAction *act = *rit;
+ sllnode<ModelAction *>* rit;
+ for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
+ ModelAction *act = rit->getVal();
/* Don't disallow due to act == reader */
if (!reader->happens_before(act) || reader == act)
}
/**
- * Finds the head(s) of the release sequence(s) containing a given ModelAction.
- * The ModelAction under consideration is expected to be taking part in
- * release/acquire synchronization as an object of the "reads from" relation.
- * Note that this can only provide release sequence support for RMW chains
- * which do not read from the future, as those actions cannot be traced until
- * their "promise" is fulfilled. Similarly, we may not even establish the
- * presence of a release sequence with certainty, as some modification order
- * constraints may be decided further in the future. Thus, this function
- * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
- * and a boolean representing certainty.
+ * Computes the clock vector that happens before propagates from this write.
*
* @param rf The action that might be part of a release sequence. Must be a
* write.
- * @param release_heads A pass-by-reference style return parameter. After
- * execution of this function, release_heads will contain the heads of all the
- * relevant release sequences, if any exists with certainty
- * @return true, if the ModelExecution is certain that release_heads is complete;
- * false otherwise
+ * @return ClockVector of happens before relation.
*/
-bool ModelExecution::release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const
-{
+ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
+ SnapVector<ModelAction *> * processset = NULL;
for ( ;rf != NULL;rf = rf->get_reads_from()) {
ASSERT(rf->is_write());
+ if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
+ break;
+ if (processset == NULL)
+ processset = new SnapVector<ModelAction *>();
+ processset->push_back(rf);
+ }
- if (rf->is_release())
- release_heads->push_back(rf);
- else if (rf->get_last_fence_release())
- release_heads->push_back(rf->get_last_fence_release());
- if (!rf->is_rmw())
- break;/* End of RMW chain */
-
- /** @todo Need to be smarter here... In the linux lock
- * example, this will run to the beginning of the program for
- * every acquire. */
- /** @todo The way to be smarter here is to keep going until 1
- * thread has a release preceded by an acquire and you've seen
- * both. */
-
- /* acq_rel RMW is a sufficient stopping condition */
- if (rf->is_acquire() && rf->is_release())
- return true;/* complete */
- };
- ASSERT(rf); // Needs to be real write
-
- if (rf->is_release())
- return true;/* complete */
-
- /* else relaxed write
- * - check for fence-release in the same thread (29.8, stmt. 3)
- * - check modification order for contiguous subsequence
- * -> rf must be same thread as release */
-
- const ModelAction *fence_release = rf->get_last_fence_release();
- /* Synchronize with a fence-release unconditionally; we don't need to
- * find any more "contiguous subsequence..." for it */
- if (fence_release)
- release_heads->push_back(fence_release);
-
- return true; /* complete */
-}
-
-/**
- * An interface for getting the release sequence head(s) with which a
- * given ModelAction must synchronize. This function only returns a non-empty
- * result when it can locate a release sequence head with certainty. Otherwise,
- * it may mark the internal state of the ModelExecution so that it will handle
- * the release sequence at a later time, causing @a acquire to update its
- * synchronization at some later point in execution.
- *
- * @param acquire The 'acquire' action that may synchronize with a release
- * sequence
- * @param read The read action that may read from a release sequence; this may
- * be the same as acquire, or else an earlier action in the same thread (i.e.,
- * when 'acquire' is a fence-acquire)
- * @param release_heads A pass-by-reference return parameter. Will be filled
- * with the head(s) of the release sequence(s), if they exists with certainty.
- * @see ModelExecution::release_seq_heads
- */
-void ModelExecution::get_release_seq_heads(ModelAction *acquire,
- ModelAction *read, rel_heads_list_t *release_heads)
-{
- const ModelAction *rf = read->get_reads_from();
+ int i = (processset == NULL) ? 0 : processset->size();
- release_seq_heads(rf, release_heads);
+ ClockVector * vec = NULL;
+ while(true) {
+ if (rf->get_rfcv() != NULL) {
+ vec = rf->get_rfcv();
+ } else if (rf->is_acquire() && rf->is_release()) {
+ vec = rf->get_cv();
+ } else if (rf->is_release() && !rf->is_rmw()) {
+ vec = rf->get_cv();
+ } else if (rf->is_release()) {
+ //have rmw that is release and doesn't have a rfcv
+ (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
+ rf->set_rfcv(vec);
+ } else {
+ //operation that isn't release
+ if (rf->get_last_fence_release()) {
+ if (vec == NULL)
+ vec = rf->get_last_fence_release()->get_cv();
+ else
+ (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
+ }
+ rf->set_rfcv(vec);
+ }
+ i--;
+ if (i >= 0) {
+ rf = (*processset)[i];
+ } else
+ break;
+ }
+ if (processset != NULL)
+ delete processset;
+ return vec;
}
/**
*
* @param act is the ModelAction to add.
*/
-void ModelExecution::add_action_to_lists(ModelAction *act)
+void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
{
int tid = id_to_int(act->get_tid());
- ModelAction *uninit = NULL;
- int uninit_id = -1;
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
- if (list->empty() && act->is_atomic_var()) {
- uninit = get_uninitialized_action(act);
- uninit_id = id_to_int(uninit->get_tid());
- list->push_front(uninit);
+ if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ act->setActionRef(list->add_back(act));
}
- list->push_back(act);
- action_trace.push_back(act);
- if (uninit)
- action_trace.push_front(uninit);
+ // Update action trace, a total order of all actions
+ act->setTraceRef(action_trace.add_back(act));
+
+ // Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
- if (tid >= (int)vec->size())
+ if ((int)vec->size() <= tid) {
+ uint oldsize = vec->size();
vec->resize(priv->next_thread_id);
- (*vec)[tid].push_back(act);
- if (uninit)
- (*vec)[uninit_id].push_front(uninit);
+ for(uint i = oldsize;i < priv->next_thread_id;i++)
+ new (&(*vec)[i]) action_list_t();
+ }
+ if (!canprune)
+ act->setThrdMapRef((*vec)[tid].add_back(act));
+ // Update thrd_last_action, the last action taken by each thread
if ((int)thrd_last_action.size() <= tid)
thrd_last_action.resize(get_num_threads());
thrd_last_action[tid] = act;
- if (uninit)
- thrd_last_action[uninit_id] = uninit;
+ // Update thrd_last_fence_release, the last release fence taken by each thread
if (act->is_fence() && act->is_release()) {
if ((int)thrd_last_fence_release.size() <= tid)
thrd_last_fence_release.resize(get_num_threads());
if (act->is_wait()) {
void *mutex_loc = (void *) act->get_value();
- get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
+ act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
- if (tid >= (int)vec->size())
+ if ((int)vec->size() <= tid) {
+ uint oldsize = vec->size();
vec->resize(priv->next_thread_id);
- (*vec)[tid].push_back(act);
+ for(uint i = oldsize;i < priv->next_thread_id;i++)
+ new (&(*vec)[i]) action_list_t();
+ }
+ act->setThrdMapRef((*vec)[tid].add_back(act));
+ }
+}
+
+sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
+ sllnode<ModelAction*> * rit = list->end();
+ modelclock_t next_seq = act->get_seq_number();
+ if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
+ return list->add_back(act);
+ else {
+ for(;rit != NULL;rit=rit->getPrev()) {
+ if (rit->getVal()->get_seq_number() == next_seq) {
+ return list->insertAfter(rit, act);
+ }
+ }
+ return NULL;
+ }
+}
+
+sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+ sllnode<ModelAction*> * rit = list->end();
+ modelclock_t next_seq = act->get_seq_number();
+ if (rit == NULL) {
+ act->create_cv(NULL);
+ return NULL;
+ } else if (rit->getVal()->get_seq_number() == next_seq) {
+ act->create_cv(rit->getVal());
+ return list->add_back(act);
+ } else {
+ for(;rit != NULL;rit=rit->getPrev()) {
+ if (rit->getVal()->get_seq_number() == next_seq) {
+ act->create_cv(rit->getVal());
+ return list->insertAfter(rit, act);
+ }
+ }
+ return NULL;
+ }
+}
+
+/**
+ * Performs various bookkeeping operations for a normal write. The
+ * complication is that we are typically inserting a normal write
+ * lazily, so we need to insert it into the middle of lists.
+ *
+ * @param act is the ModelAction to add.
+ */
+
+void ModelExecution::add_normal_write_to_lists(ModelAction *act)
+{
+ int tid = id_to_int(act->get_tid());
+ act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
+
+ // Update obj_thrd_map, a per location, per thread, order of actions
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ if (tid >= (int)vec->size()) {
+ uint oldsize =vec->size();
+ vec->resize(priv->next_thread_id);
+ for(uint i=oldsize;i<priv->next_thread_id;i++)
+ new (&(*vec)[i]) action_list_t();
}
+ act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
+
+ ModelAction * lastact = thrd_last_action[tid];
+ // Update thrd_last_action, the last action taken by each thrad
+ if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
+ thrd_last_action[tid] = act;
+}
+
+
+void ModelExecution::add_write_to_lists(ModelAction *write) {
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
+ int tid = id_to_int(write->get_tid());
+ if (tid >= (int)vec->size()) {
+ uint oldsize =vec->size();
+ vec->resize(priv->next_thread_id);
+ for(uint i=oldsize;i<priv->next_thread_id;i++)
+ new (&(*vec)[i]) action_list_t();
+ }
+ write->setActionRef((*vec)[tid].add_back(write));
}
/**
ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
{
void *location = curr->get_location();
- action_list_t *list = obj_map.get(location);
- /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin();(*rit) != curr;rit++)
- ;
- rit++; /* Skip past curr */
- for ( ;rit != list->rend();rit++)
- if ((*rit)->is_write() && (*rit)->is_seqcst())
- return *rit;
- return NULL;
+ return obj_last_sc_map.get(location);
}
/**
if (!list)
return NULL;
- action_list_t::reverse_iterator rit = list->rbegin();
+ sllnode<ModelAction*>* rit = list->end();
if (before_fence) {
- for (;rit != list->rend();rit++)
- if (*rit == before_fence)
+ for (;rit != NULL;rit=rit->getPrev())
+ if (rit->getVal() == before_fence)
break;
- ASSERT(*rit == before_fence);
- rit++;
+ ASSERT(rit->getVal() == before_fence);
+ rit=rit->getPrev();
}
- for (;rit != list->rend();rit++)
- if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
- return *rit;
+ for (;rit != NULL;rit=rit->getPrev()) {
+ ModelAction *act = rit->getVal();
+ if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
+ return act;
+ }
return NULL;
}
void *location = curr->get_location();
action_list_t *list = obj_map.get(location);
+ if (list == NULL)
+ return NULL;
+
/* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin();rit != list->rend();rit++)
- if ((*rit)->is_unlock() || (*rit)->is_wait())
- return *rit;
+ sllnode<ModelAction*>* rit;
+ for (rit = list->end();rit != NULL;rit=rit->getPrev())
+ if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
+ return rit->getVal();
return NULL;
}
*/
ClockVector * ModelExecution::get_cv(thread_id_t tid) const
{
- return get_parent_action(tid)->get_cv();
+ ModelAction *firstaction=get_parent_action(tid);
+ return firstaction != NULL ? firstaction->get_cv() : NULL;
}
bool valequals(uint64_t val1, uint64_t val2, int size) {
*/
SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
{
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
+ SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
unsigned int i;
ASSERT(curr->is_read());
SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
/* Iterate over all threads */
- for (i = 0;i < thrd_lists->size();i++) {
- /* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin();rit != list->rend();rit++) {
- ModelAction *act = *rit;
-
- /* Only consider 'write' actions */
- if (!act->is_write() || act == curr)
- continue;
+ if (thrd_lists != NULL)
+ for (i = 0;i < thrd_lists->size();i++) {
+ /* Iterate over actions in thread, starting from most recent */
+ action_list_t *list = &(*thrd_lists)[i];
+ sllnode<ModelAction *> * rit;
+ for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
+ ModelAction *act = rit->getVal();
+
+ if (act == curr)
+ continue;
- /* Don't consider more than one seq_cst write if we are a seq_cst read. */
- bool allow_read = true;
-
- if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
- allow_read = false;
-
- /* Need to check whether we will have two RMW reading from the same value */
- if (curr->is_rmwr()) {
- /* It is okay if we have a failing CAS */
- if (!curr->is_rmwrcas() ||
- valequals(curr->get_value(), act->get_value(), curr->getSize())) {
- //Need to make sure we aren't the second RMW
- CycleNode * node = mo_graph->getNode_noCreate(act);
- if (node != NULL && node->getRMW() != NULL) {
- //we are the second RMW
- allow_read = false;
+ /* Don't consider more than one seq_cst write if we are a seq_cst read. */
+ bool allow_read = true;
+
+ if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
+ allow_read = false;
+
+ /* Need to check whether we will have two RMW reading from the same value */
+ if (curr->is_rmwr()) {
+ /* It is okay if we have a failing CAS */
+ if (!curr->is_rmwrcas() ||
+ valequals(curr->get_value(), act->get_value(), curr->getSize())) {
+ //Need to make sure we aren't the second RMW
+ CycleNode * node = mo_graph->getNode_noCreate(act);
+ if (node != NULL && node->getRMW() != NULL) {
+ //we are the second RMW
+ allow_read = false;
+ }
}
}
- }
- if (allow_read) {
- /* Only add feasible reads */
- rf_set->push_back(act);
- }
+ if (allow_read) {
+ /* Only add feasible reads */
+ rf_set->push_back(act);
+ }
- /* Include at most one act per-thread that "happens before" curr */
- if (act->happens_before(curr))
- break;
+ /* Include at most one act per-thread that "happens before" curr */
+ if (act->happens_before(curr))
+ break;
+ }
}
- }
if (DBG_ENABLED()) {
model_print("Reached read action:\n");
return rf_set;
}
-/**
- * @brief Get an action representing an uninitialized atomic
- *
- * This function may create a new one or try to retrieve one from the NodeStack
- *
- * @param curr The current action, which prompts the creation of an UNINIT action
- * @return A pointer to the UNINIT ModelAction
- */
-ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
+static void print_list(action_list_t *list)
{
- Node *node = curr->get_node();
- ModelAction *act = node->get_uninit_action();
- if (!act) {
- act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
- node->set_uninit_action(act);
- }
- act->create_cv(NULL);
- return act;
-}
-
-static void print_list(const action_list_t *list)
-{
- action_list_t::const_iterator it;
+ sllnode<ModelAction*> *it;
model_print("------------------------------------------------------------------------------------\n");
model_print("# t Action type MO Location Value Rf CV\n");
unsigned int hash = 0;
- for (it = list->begin();it != list->end();it++) {
- const ModelAction *act = *it;
+ for (it = list->begin();it != NULL;it=it->getNext()) {
+ const ModelAction *act = it->getVal();
if (act->get_seq_number() > 0)
act->print();
- hash = hash^(hash<<3)^((*it)->hash());
+ hash = hash^(hash<<3)^(it->getVal()->hash());
}
model_print("HASH %u\n", hash);
model_print("------------------------------------------------------------------------------------\n");
}
#if SUPPORT_MOD_ORDER_DUMP
-void ModelExecution::dumpGraph(char *filename) const
+void ModelExecution::dumpGraph(char *filename)
{
char buffer[200];
sprintf(buffer, "%s.dot", filename);
mo_graph->dumpNodes(file);
ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
- for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
- ModelAction *act = *it;
+ for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
+ ModelAction *act = it->getVal();
if (act->is_read()) {
mo_graph->dot_print_node(file, act);
mo_graph->dot_print_edge(file,
#endif
/** @brief Prints an execution trace summary. */
-void ModelExecution::print_summary() const
+void ModelExecution::print_summary()
{
#if SUPPORT_MOD_ORDER_DUMP
char buffername[100];
#endif
model_print("Execution trace %d:", get_execution_number());
- if (isfeasibleprefix()) {
- if (scheduler->all_threads_sleeping())
- model_print(" SLEEP-SET REDUNDANT");
- if (have_bug_reports())
- model_print(" DETECTED BUG(S)");
- } else
- print_infeasibility(" INFEASIBLE");
+ if (scheduler->all_threads_sleeping())
+ model_print(" SLEEP-SET REDUNDANT");
+ if (have_bug_reports())
+ model_print(" DETECTED BUG(S)");
+
model_print("\n");
print_list(&action_trace);
Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
{
/* Do not split atomic RMW */
- if (curr->is_rmwr())
+ if (curr->is_rmwr() && !paused_by_fuzzer(curr))
return get_thread(curr);
- if (curr->is_write()) {
- std::memory_order order = curr->get_mo();
- switch(order) {
- case std::memory_order_relaxed:
- return get_thread(curr);
- case std::memory_order_release:
- return get_thread(curr);
- default:
- return NULL;
- }
- }
-
/* Follow CREATE with the created thread */
/* which is not needed, because model.cc takes care of this */
if (curr->get_type() == THREAD_CREATE)
return NULL;
}
+/** @param act A read atomic action */
+bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
+{
+ ASSERT(act->is_read());
+
+ // Actions paused by fuzzer have their sequence number reset to 0
+ return act->get_seq_number() == 0;
+}
+
/**
* Takes the next step in the execution, if possible.
* @param curr The current step to take
curr = check_current_action(curr);
ASSERT(curr);
+ /* Process this action in ModelHistory for records */
+ model->get_history()->process_action( curr, curr->get_tid() );
+
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
return action_select_next_thread(curr);
}
+void ModelExecution::removeAction(ModelAction *act) {
+ {
+ sllnode<ModelAction *> * listref = act->getTraceRef();
+ if (listref != NULL) {
+ action_trace.erase(listref);
+ }
+ }
+ {
+ sllnode<ModelAction *> * listref = act->getThrdMapRef();
+ if (listref != NULL) {
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ (*vec)[act->get_tid()].erase(listref);
+ }
+ }
+ if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
+ sllnode<ModelAction *> * listref = act->getActionRef();
+ if (listref != NULL) {
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ list->erase(listref);
+ }
+ } else if (act->is_wait()) {
+ sllnode<ModelAction *> * listref = act->getActionRef();
+ if (listref != NULL) {
+ void *mutex_loc = (void *) act->get_value();
+ get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
+ }
+ } else if (act->is_free()) {
+ sllnode<ModelAction *> * listref = act->getActionRef();
+ if (listref != NULL) {
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
+ (*vec)[act->get_tid()].erase(listref);
+ }
+ //Remove from Cyclegraph
+ mo_graph->freeAction(act);
+ }
+}
+
+ClockVector * ModelExecution::computeMinimalCV() {
+ ClockVector *cvmin = NULL;
+ //Thread 0 isn't a real thread, so skip it..
+ for(unsigned int i = 1;i < thread_map.size();i++) {
+ Thread * t = thread_map[i];
+ if (t->get_state() == THREAD_COMPLETED)
+ continue;
+ thread_id_t tid = int_to_id(i);
+ ClockVector * cv = get_cv(tid);
+ if (cvmin == NULL)
+ cvmin = new ClockVector(cv, NULL);
+ else
+ cvmin->minmerge(cv);
+ }
+ return cvmin;
+}
+
+//Options...
+//How often to check for memory
+//How much of the trace to always keep
+//Whether to sacrifice completeness...i.e., remove visible writes
+
+void ModelExecution::collectActions() {
+ //Compute minimal clock vector for all live threads
+ ClockVector *cvmin = computeMinimalCV();
+ cvmin->print();
+ SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
+ modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
+
+ //Next walk action trace... When we hit an action, see if it is
+ //invisible (e.g., earlier than the first before the minimum
+ //clock for the thread... if so erase it and all previous
+ //actions in cyclegraph
+ sllnode<ModelAction*> * it;
+ for (it = action_trace.begin();it != NULL;it=it->getNext()) {
+ ModelAction *act = it->getVal();
+ modelclock_t actseq = act->get_seq_number();
+
+ //See if we are done
+ if (actseq > maxtofree)
+ break;
+
+ thread_id_t act_tid = act->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(act_tid);
+ if (actseq <= tid_clock || params->removevisible) {
+ ModelAction * write;
+ if (act->is_write()) {
+ write = act;
+ } else if (act->is_read()) {
+ write = act->get_reads_from();
+ } else
+ continue;
+
+ //Mark everything earlier in MO graph to be freed
+ CycleNode * cn = mo_graph->getNode_noCreate(write);
+ if (cn != NULL) {
+ queue->push_back(cn);
+ while(!queue->empty()) {
+ CycleNode * node = queue->back();
+ queue->pop_back();
+ for(unsigned int i=0;i<node->getNumInEdges();i++) {
+ CycleNode * prevnode = node->getInEdge(i);
+ ModelAction * prevact = prevnode->getAction();
+ if (prevact->get_type() != READY_FREE) {
+ prevact->set_free();
+ queue->push_back(prevnode);
+ }
+ }
+ }
+ }
+ }
+ }
+ for (;it != NULL;) {
+ ModelAction *act = it->getVal();
+ //Do iteration early since we may delete node...
+ it=it->getPrev();
+ bool islastact = false;
+ ModelAction *lastact = get_last_action(act->get_tid());
+ if (act == lastact) {
+ Thread * th = get_thread(act);
+ islastact = !th->is_complete();
+ }
+
+ if (act->is_read()) {
+ if (islastact) {
+ act->set_read_from(NULL);
+ }
+ if (act->get_reads_from()->is_free()) {
+ removeAction(act);
+ delete act;
+ } else {
+ const ModelAction *rel_fence =act->get_last_fence_release();
+ if (rel_fence != NULL) {
+ modelclock_t relfenceseq = rel_fence->get_seq_number();
+ thread_id_t relfence_tid = rel_fence->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(relfence_tid);
+ //Remove references to irrelevant release fences
+ if (relfenceseq <= tid_clock)
+ act->set_last_fence_release(NULL);
+ }
+ }
+ }
+
+ if (islastact) {
+ continue;
+ } else if (act->is_free()) {
+ removeAction(act);
+ delete act;
+ } else if (act->is_write()) {
+ //Do nothing with write that hasn't been marked to be freed
+ } else if (act->is_fence()) {
+ //Note that acquire fences can always be safely
+ //removed, but could incur extra overheads in
+ //traversals. Removing them before the cvmin seems
+ //like a good compromise.
+
+ //Release fences before the cvmin don't do anything
+ //because everyone has already synchronized.
+
+ //Sequentially fences before cvmin are redundant
+ //because happens-before will enforce same
+ //orderings.
+
+ modelclock_t actseq = act->get_seq_number();
+ thread_id_t act_tid = act->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(act_tid);
+ if (actseq <= tid_clock) {
+ removeAction(act);
+ delete act;
+ }
+ } else {
+ //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish
+ //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
+ //need to keep most recent unlock/wait for each lock
+ if(act->is_unlock() || act->is_wait()) {
+ ModelAction * lastlock = get_last_unlock(act);
+ if (lastlock != act) {
+ removeAction(act);
+ delete act;
+ }
+ } else if (act->is_create()) {
+ if (act->get_thread_operand()->is_complete()) {
+ removeAction(act);
+ delete act;
+ }
+ } else {
+ removeAction(act);
+ delete act;
+ }
+ }
+ }
+
+ delete cvmin;
+ delete queue;
+}
+
+
+
Fuzzer * ModelExecution::getFuzzer() {
return fuzzer;
}