next_thread_id(INITIAL_THREAD_ID),
used_sequence_numbers(0),
bugs(),
- bad_synchronization(false),
asserted(false)
{ }
modelclock_t used_sequence_numbers;
SnapVector<bug_message *> bugs;
/** @brief Incorrectly-ordered synchronization was made */
- bool bad_synchronization;
bool asserted;
SNAPSHOTALLOC
model(m),
params(NULL),
scheduler(scheduler),
- action_trace(),
thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
pthread_counter(1),
+ action_trace(),
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
priv(new struct model_snapshot_members ()),
mo_graph(new CycleGraph()),
fuzzer(new NewFuzzer()),
- thrd_func_list(),
- thrd_func_act_lists(),
isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
model_thread = new Thread(get_next_id());
add_thread(model_thread);
- scheduler->register_engine(this);
fuzzer->register_engine(m->get_history(), this);
+ scheduler->register_engine(this);
+#ifdef TLS
+ pthread_key_create(&pthreadkey, tlsdestructor);
+#endif
}
/** @brief Destructor */
return model->get_execution_number();
}
-static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
+static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
{
action_list_t *tmp = hash->get(ptr);
if (tmp == NULL) {
return tmp;
}
-static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
+static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
{
SnapVector<action_list_t> *tmp = hash->get(ptr);
if (tmp == NULL) {
if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
return true;
}
+ /* The sleep is literally sleeping */
if (asleep->is_sleep()) {
if (fuzzer->shouldWake(asleep))
return true;
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *thr = get_thread(int_to_id(i));
if (scheduler->is_sleep_set(thr)) {
- if (should_wake_up(curr, thr))
+ if (should_wake_up(curr, thr)) {
/* Remove this thread from sleep set */
scheduler->remove_sleep(thr);
+ if (thr->get_pending()->is_sleep())
+ thr->set_wakeup_state(true);
+ }
}
}
}
-/** @brief Alert the model-checker that an incorrectly-ordered
- * synchronization was made */
-void ModelExecution::set_bad_synchronization()
-{
- priv->bad_synchronization = true;
-}
-
-bool ModelExecution::assert_bug(const char *msg)
+void ModelExecution::assert_bug(const char *msg)
{
priv->bugs.push_back(new bug_message(msg));
-
- if (isfeasibleprefix()) {
- set_assert();
- return true;
- }
- return false;
+ set_assert();
}
/** @return True, if any bugs have been reported for this execution */
*/
bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
- SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
+ SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
if (hasnonatomicstore) {
ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
rf_set->push_back(nonatomicstore);
}
+ // Remove writes that violate read modification order
+ uint i = 0;
+ while (i < rf_set->size()) {
+ ModelAction * rf = (*rf_set)[i];
+ if (!r_modification_order(curr, rf, NULL, NULL, true)) {
+ (*rf_set)[i] = rf_set->back();
+ rf_set->pop_back();
+ } else
+ i++;
+ }
+
while(true) {
int index = fuzzer->selectWrite(curr, rf_set);
- if (index == -1) // no feasible write exists
- return false;
ModelAction *rf = (*rf_set)[index];
if (canprune && curr->get_type() == ATOMIC_READ) {
int tid = id_to_int(curr->get_tid());
(*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
+ curr->setThrdMapRef(NULL);
}
return true;
}
+
+ ASSERT(false);
+ /* TODO: Following code not needed anymore */
priorset->clear();
(*rf_set)[index] = rf_set->back();
rf_set->pop_back();
case THREAD_START: {
break;
}
+ case THREAD_SLEEP: {
+ Thread *th = get_thread(curr);
+ th->set_pending(curr);
+ scheduler->add_sleep(th);
+ break;
+ }
default:
break;
}
bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
{
if (*second < *first) {
- set_bad_synchronization();
+ ASSERT(0); //This should not happend
return false;
}
return second->synchronize_with(first);
wake_up_sleeping_actions(curr);
- /* Add uninitialized actions to lists */
- if (!second_part_of_rmw && curr->get_type() != NOOP)
- add_uninit_action_to_lists(curr);
-
SnapVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
rf_set = build_may_read_from(curr);
if (curr->is_read() && !second_part_of_rmw) {
- bool success = process_read(curr, rf_set);
+ process_read(curr, rf_set);
delete rf_set;
- if (!success)
- return curr; // Do not add action to lists
} else
ASSERT(rf_set == NULL);
/* Add the action to lists */
- if (!second_part_of_rmw && curr->get_type() != NOOP)
+ if (!second_part_of_rmw)
add_action_to_lists(curr);
if (curr->is_write())
return curr;
}
-/**
- * This is the strongest feasibility check available.
- * @return whether the current trace (partial or complete) must be a prefix of
- * a feasible trace.
- */
-bool ModelExecution::isfeasibleprefix() const
-{
- return !is_infeasible();
-}
-
-/**
- * Print disagnostic information about an infeasible execution
- * @param prefix A string to prefix the output with; if NULL, then a default
- * message prefix will be provided
- */
-void ModelExecution::print_infeasibility(const char *prefix) const
-{
- char buf[100];
- char *ptr = buf;
- if (priv->bad_synchronization)
- ptr += sprintf(ptr, "[bad sw ordering]");
- if (ptr != buf)
- model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
-}
-
-/**
- * Check if the current partial trace is infeasible. Does not check any
- * end-of-execution flags, which might rule out the execution. Thus, this is
- * useful only for ruling an execution as infeasible.
- * @return whether the current partial trace is infeasible.
- */
-bool ModelExecution::is_infeasible() const
-{
- return priv->bad_synchronization;
-}
-
/** Close out a RMWR by converting previous RMWR into a RMW or READ. */
ModelAction * ModelExecution::process_rmw(ModelAction *act) {
ModelAction *lastread = get_last_action(act->get_tid());
*
* @param curr The current action. Must be a read.
* @param rf The ModelAction or Promise that curr reads from. Must be a write.
+ * @param check_only If true, then only check whether the current action satisfies
+ * read modification order or not, without modifiying priorset and canprune.
+ * False by default.
* @return True if modification order edges were added; false otherwise
*/
-bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
+bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
+ SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
*act < *last_sc_fence_thread_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 4 */
*act < *last_sc_fence_local) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
/* C++, Section 29.3 statement 6 */
*act < *last_sc_fence_thread_before) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
break;
}
}
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
- priorset->push_back(act);
+ if (!check_only)
+ priorset->push_back(act);
} else {
- const ModelAction *prevrf = act->get_reads_from();
+ ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf)) {
if (mo_graph->checkReachable(rf, prevrf))
return false;
- priorset->push_back(prevrf);
+ if (!check_only)
+ priorset->push_back(prevrf);
} else {
if (act->get_tid() == curr->get_tid()) {
//Can prune curr from obj list
- *canprune = true;
+ if (!check_only)
+ *canprune = true;
}
}
}
return vec;
}
-/**
- * Performs various bookkeeping operations for the current ModelAction when it is
- * the first atomic action occurred at its memory location.
- *
- * For instance, adds uninitialized action to the per-object, per-thread action vector
- * and to the action trace list of all thread actions.
- *
- * @param act is the ModelAction to process.
- */
-void ModelExecution::add_uninit_action_to_lists(ModelAction *act)
-{
- int tid = id_to_int(act->get_tid());
- ModelAction *uninit = NULL;
- int uninit_id = -1;
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
- if (list->empty() && act->is_atomic_var()) {
- uninit = get_uninitialized_action(act);
- uninit_id = id_to_int(uninit->get_tid());
- list->push_front(uninit);
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
- if ((int)vec->size() <= uninit_id) {
- int oldsize = (int) vec->size();
- vec->resize(uninit_id + 1);
- for(int i = oldsize; i < uninit_id + 1; i++) {
- new (&(*vec)[i]) action_list_t();
- }
- }
- (*vec)[uninit_id].push_front(uninit);
- }
-
- // Update action trace, a total order of all actions
- if (uninit)
- action_trace.push_front(uninit);
-
- // Update obj_thrd_map, a per location, per thread, order of actions
- SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
- if ((int)vec->size() <= tid) {
- uint oldsize = vec->size();
- vec->resize(priv->next_thread_id);
- for(uint i = oldsize; i < priv->next_thread_id; i++)
- new (&(*vec)[i]) action_list_t();
- }
- if (uninit)
- (*vec)[uninit_id].push_front(uninit);
-
- // Update thrd_last_action, the last action taken by each thrad
- if ((int)thrd_last_action.size() <= tid)
- thrd_last_action.resize(get_num_threads());
- if (uninit)
- thrd_last_action[uninit_id] = uninit;
-}
-
-
/**
* Performs various bookkeeping operations for the current ModelAction. For
* instance, adds action to the per-object, per-thread action vector and to the
void ModelExecution::add_action_to_lists(ModelAction *act)
{
int tid = id_to_int(act->get_tid());
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
- list->push_back(act);
+ if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ act->setActionRef(list->add_back(act));
+ }
// Update action trace, a total order of all actions
- action_trace.push_back(act);
+ act->setTraceRef(action_trace.add_back(act));
+
// Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
if ((int)vec->size() <= tid) {
uint oldsize = vec->size();
vec->resize(priv->next_thread_id);
- for(uint i = oldsize; i < priv->next_thread_id; i++)
+ for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
}
- (*vec)[tid].push_back(act);
+ act->setThrdMapRef((*vec)[tid].add_back(act));
- // Update thrd_last_action, the last action taken by each thrad
+ // Update thrd_last_action, the last action taken by each thread
if ((int)thrd_last_action.size() <= tid)
thrd_last_action.resize(get_num_threads());
thrd_last_action[tid] = act;
if (act->is_wait()) {
void *mutex_loc = (void *) act->get_value();
- get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
+ act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
if ((int)vec->size() <= tid) {
uint oldsize = vec->size();
vec->resize(priv->next_thread_id);
- for(uint i = oldsize; i < priv->next_thread_id; i++)
+ for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
}
- (*vec)[tid].push_back(act);
+ act->setThrdMapRef((*vec)[tid].add_back(act));
}
}
-void insertIntoActionList(action_list_t *list, ModelAction *act) {
+sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
sllnode<ModelAction*> * rit = list->end();
modelclock_t next_seq = act->get_seq_number();
if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
- list->push_back(act);
+ return list->add_back(act);
else {
for(;rit != NULL;rit=rit->getPrev()) {
if (rit->getVal()->get_seq_number() == next_seq) {
- list->insertAfter(rit, act);
- break;
+ return list->insertAfter(rit, act);
}
}
+ return NULL;
}
}
-void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
sllnode<ModelAction*> * rit = list->end();
modelclock_t next_seq = act->get_seq_number();
if (rit == NULL) {
act->create_cv(NULL);
+ return NULL;
} else if (rit->getVal()->get_seq_number() == next_seq) {
act->create_cv(rit->getVal());
- list->push_back(act);
+ return list->add_back(act);
} else {
for(;rit != NULL;rit=rit->getPrev()) {
if (rit->getVal()->get_seq_number() == next_seq) {
act->create_cv(rit->getVal());
- list->insertAfter(rit, act);
- break;
+ return list->insertAfter(rit, act);
}
}
+ return NULL;
}
}
void ModelExecution::add_normal_write_to_lists(ModelAction *act)
{
int tid = id_to_int(act->get_tid());
- insertIntoActionListAndSetCV(&action_trace, act);
-
- action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
- insertIntoActionList(list, act);
+ act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
// Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
for(uint i=oldsize;i<priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
}
- insertIntoActionList(&(*vec)[tid],act);
+ act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
// Update thrd_last_action, the last action taken by each thrad
if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
for(uint i=oldsize;i<priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
}
- (*vec)[tid].push_back(write);
+ write->setActionRef((*vec)[tid].add_back(write));
}
/**
void *location = curr->get_location();
action_list_t *list = obj_map.get(location);
+ if (list == NULL)
+ return NULL;
+
/* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
sllnode<ModelAction*>* rit;
for (rit = list->end();rit != NULL;rit=rit->getPrev())
return rf_set;
}
-/**
- * @brief Get an action representing an uninitialized atomic
- *
- * This function may create a new one.
- *
- * @param curr The current action, which prompts the creation of an UNINIT action
- * @return A pointer to the UNINIT ModelAction
- */
-ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
-{
- ModelAction *act = curr->get_uninit_action();
- if (!act) {
- act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
- curr->set_uninit_action(act);
- }
- act->create_cv(NULL);
- return act;
-}
-
static void print_list(action_list_t *list)
{
sllnode<ModelAction*> *it;
#endif
model_print("Execution trace %d:", get_execution_number());
- if (isfeasibleprefix()) {
- if (scheduler->all_threads_sleeping())
- model_print(" SLEEP-SET REDUNDANT");
- if (have_bug_reports())
- model_print(" DETECTED BUG(S)");
- } else
- print_infeasibility(" INFEASIBLE");
+ if (scheduler->all_threads_sleeping())
+ model_print(" SLEEP-SET REDUNDANT");
+ if (have_bug_reports())
+ model_print(" DETECTED BUG(S)");
+
model_print("\n");
print_list(&action_trace);
return action_select_next_thread(curr);
}
+void ModelExecution::removeAction(ModelAction *act) {
+ {
+ sllnode<ModelAction *> * listref = act->getTraceRef();
+ if (listref != NULL) {
+ action_trace.erase(listref);
+ }
+ }
+ {
+ sllnode<ModelAction *> * listref = act->getThrdMapRef();
+ if (listref != NULL) {
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ (*vec)[act->get_tid()].erase(listref);
+ }
+ }
+ if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
+ sllnode<ModelAction *> * listref = act->getActionRef();
+ if (listref != NULL) {
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ list->erase(listref);
+ }
+ } else if (act->is_wait()) {
+ sllnode<ModelAction *> * listref = act->getActionRef();
+ if (listref != NULL) {
+ void *mutex_loc = (void *) act->get_value();
+ get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
+ }
+ } else if (act->is_write()) {
+ sllnode<ModelAction *> * listref = act->getActionRef();
+ if (listref != NULL) {
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
+ (*vec)[act->get_tid()].erase(listref);
+ }
+ //Remove from Cyclegraph
+ mo_graph->freeAction(act);
+ }
+}
+
+ClockVector * ModelExecution::computeMinimalCV() {
+ ClockVector *cvmin = NULL;
+ for(unsigned int i = 0;i < thread_map.size();i++) {
+ Thread * t = thread_map[i];
+ if (t->get_state() == THREAD_COMPLETED)
+ continue;
+ thread_id_t tid = int_to_id(i);
+ ClockVector * cv = get_cv(tid);
+ if (cvmin == NULL)
+ cvmin = new ClockVector(cv, NULL);
+ else
+ cvmin->minmerge(cv);
+ }
+ return cvmin;
+}
+
+void ModelExecution::collectActions() {
+ //Compute minimal clock vector for all live threads
+ ClockVector *cvmin = computeMinimalCV();
+ SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
+ //walk action trace... When we hit an action, see if it is
+ //invisible (e.g., earlier than the first before the minimum
+ //clock for the thread... if so erase it and all previous
+ //actions in cyclegraph
+ for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
+ ModelAction *act = it->getVal();
+ modelclock_t actseq = act->get_seq_number();
+ thread_id_t act_tid = act->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(act_tid);
+ if (actseq <= tid_clock) {
+ ModelAction * write;
+ if (act->is_write()) {
+ write = act;
+ } else if (act->is_read()) {
+ write = act->get_reads_from();
+ } else
+ continue;
+
+ //Mark everything earlier in MO graph to be freed
+ queue->push_back(mo_graph->getNode_noCreate(write));
+ while(!queue->empty()) {
+ CycleNode * node = queue->back();
+ queue->pop_back();
+ for(unsigned int i=0;i<node->getNumInEdges();i++) {
+ CycleNode * prevnode = node->getInEdge(i);
+ ModelAction * prevact = prevnode->getAction();
+ if (prevact->get_type() != READY_FREE) {
+ prevact->set_free();
+ queue->push_back(prevnode);
+ }
+ }
+ }
+ }
+ }
+ for (sllnode<ModelAction*>* it = action_trace.end();it != NULL;it=it->getPrev()) {
+ ModelAction *act = it->getVal();
+ if (act->is_free()) {
+ removeAction(act);
+ delete act;
+ } else if (act->is_read()) {
+ if (act->get_reads_from()->is_free()) {
+ removeAction(act);
+ delete act;
+ } else {
+ const ModelAction *rel_fence =act->get_last_fence_release();
+ if (rel_fence != NULL) {
+ modelclock_t relfenceseq = rel_fence->get_seq_number();
+ thread_id_t relfence_tid = rel_fence->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(relfence_tid);
+ //Remove references to irrelevant release fences
+ if (relfenceseq <= tid_clock)
+ act->set_last_fence_release(NULL);
+ }
+ }
+ } else if (act->is_fence()) {
+ //Note that acquire fences can always be safely
+ //removed, but could incur extra overheads in
+ //traversals. Removing them before the cvmin seems
+ //like a good compromise.
+
+ //Release fences before the cvmin don't do anything
+ //because everyone has already synchronized.
+
+ //Sequentially fences before cvmin are redundant
+ //because happens-before will enforce same
+ //orderings.
+
+ modelclock_t actseq = act->get_seq_number();
+ thread_id_t act_tid = act->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(act_tid);
+ if (actseq <= tid_clock) {
+ removeAction(act);
+ delete act;
+ }
+ } else {
+ //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish
+ //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
+ //need to keep most recent unlock/wait for each lock
+ }
+ }
+
+ delete cvmin;
+ delete queue;
+}
+
+
+
Fuzzer * ModelExecution::getFuzzer() {
return fuzzer;
}