* adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
* followed by a THREAD_START, or it can enforce execution replay/backtracking.
* The model-checker may have no preference regarding the next thread (i.e.,
- * when exploring a new execution ordering), in which case this will return
- * NULL.
- * @param curr The current ModelAction. This action might guide the choice of
- * next thread.
- * @return The next thread to run. If the model-checker has no preference, NULL.
+ * when exploring a new execution ordering), in which case we defer to the
+ * scheduler.
+ *
+ * @param curr Optional: The current ModelAction. Only used if non-NULL and it
+ * might guide the choice of next thread (i.e., THREAD_CREATE should be
+ * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
+ * @return The next chosen thread to run, if any exist. Or else if no threads
+ * remain to be executed, return NULL.
*/
Thread * ModelChecker::get_next_thread(ModelAction *curr)
{
return curr->get_thread_operand();
}
- /* Have we completed exploring the preselected path? */
+ /*
+ * Have we completed exploring the preselected path? Then let the
+ * scheduler decide
+ */
if (diverge == NULL)
- return NULL;
+ return scheduler->select_next_thread();
/* Else, we are trying to replay an execution */
ModelAction *next = node_stack->get_next()->get_action();
return true;
}
-ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
+ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
+{
+ /* Only perform release/acquire fence backtracking for stores */
+ if (!act->is_write())
+ return NULL;
+
+ /* Find a fence-release (or, act is a release) */
+ ModelAction *last_release;
+ if (act->is_release())
+ last_release = act;
+ else
+ last_release = get_last_fence_release(act->get_tid());
+ if (!last_release)
+ return NULL;
+
+ std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
+ std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
+ bool found_acquire_fences = false;
+ /* Find a prior:
+ * load-acquire
+ * or
+ * load --sb-> fence-acquire */
+ action_list_t *list = action_trace;
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (act->same_thread(prev))
+ continue;
+
+ int tid = id_to_int(prev->get_tid());
+
+ if (prev->is_read() && act->same_var(prev)) {
+ if (prev->is_acquire()) {
+ /* Found most recent load-acquire, don't need
+ * to search for more fences */
+ if (!found_acquire_fences)
+ return NULL;
+ } else {
+ prior_loads[tid] = prev;
+ }
+ }
+ if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
+ found_acquire_fences = true;
+ acquire_fences[tid] = prev;
+ }
+ }
+
+ ModelAction *latest_backtrack = NULL;
+ for (unsigned int i = 0; i < acquire_fences.size(); i++)
+ if (acquire_fences[i] && prior_loads[i])
+ if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
+ latest_backtrack = acquire_fences[i];
+ return latest_backtrack;
+}
+
+ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
{
switch (act->get_type()) {
- case ATOMIC_FENCE:
+ /* case ATOMIC_FENCE: fences don't directly cause backtracking */
case ATOMIC_READ:
case ATOMIC_WRITE:
case ATOMIC_RMW: {
- /* Optimization: relaxed operations don't need backtracking */
- if (act->is_relaxed())
- return NULL;
+ ModelAction *ret = NULL;
+
/* linear search: from most recent to oldest */
action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *prev = *rit;
- if (prev->could_synchronize_with(act))
- return prev;
+ if (prev->could_synchronize_with(act)) {
+ ret = prev;
+ break;
+ }
}
- break;
+
+ ModelAction *ret2 = get_last_fence_conflict(act);
+ if (!ret2)
+ return ret;
+ if (!ret)
+ return ret2;
+ if (*ret < *ret2)
+ return ret2;
+ return ret;
}
case ATOMIC_LOCK:
case ATOMIC_TRYLOCK: {
if (mo_graph->checkForCycles())
return false;
- while (rf) {
+ for ( ; rf != NULL; rf = rf->get_reads_from()) {
ASSERT(rf->is_write());
if (rf->is_release())
/* acq_rel RMW is a sufficient stopping condition */
if (rf->is_acquire() && rf->is_release())
return true; /* complete */
-
- rf = rf->get_reads_from();
};
if (!rf) {
/* read from future: need to settle this later */
bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
{
- while (true) {
+ for ( ; write != NULL; write = write->get_reads_from()) {
/* UNINIT actions don't have a Node, and they never sleep */
if (write->is_uninitialized())
return true;
bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
if (write->is_release() && thread_sleep)
return true;
- if (!write->is_rmw()) {
+ if (!write->is_rmw())
return false;
- }
- if (write->get_reads_from() == NULL)
- return true;
- write = write->get_reads_from();
}
+ return true;
}
/**
scheduler->remove_thread(curr_thrd);
Thread *next_thrd = get_next_thread(curr);
- /* Only ask for the next thread from Scheduler if we haven't chosen one
- * already */
- if (!next_thrd)
- next_thrd = scheduler->select_next_thread();
DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
next_thrd ? id_to_int(next_thrd->get_id()) : -1);
};
} while (next_execution());
+ model_print("******* Model-checking complete: *******\n");
print_stats();
}