return node_stack->get_head();
}
+/**
+ * @brief Select the next thread to execute based on the curren action
+ *
+ * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
+ * actions should be followed by the execution of their child thread. In either
+ * case, the current action should determine the next thread schedule.
+ *
+ * @param curr The current action
+ * @return The next thread to run, if the current action will determine this
+ * selection; otherwise NULL
+ */
+Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
+{
+ /* Do not split atomic RMW */
+ if (curr->is_rmwr())
+ return get_thread(curr);
+ /* Follow CREATE with the created thread */
+ if (curr->get_type() == THREAD_CREATE)
+ return curr->get_thread_operand();
+ return NULL;
+}
+
/**
* @brief Choose the next thread to execute.
*
- * This function chooses the next thread that should execute. It can force the
- * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
- * followed by a THREAD_START, or it can enforce execution replay/backtracking.
- * The model-checker may have no preference regarding the next thread (i.e.,
- * when exploring a new execution ordering), in which case we defer to the
- * scheduler.
+ * This function chooses the next thread that should execute. It can enforce
+ * execution replay/backtracking or, if the model-checker has no preference
+ * regarding the next thread (i.e., when exploring a new execution ordering),
+ * we defer to the scheduler.
*
- * @param curr Optional: The current ModelAction. Only used if non-NULL and it
- * might guide the choice of next thread (i.e., THREAD_CREATE should be
- * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
- * @return The next chosen thread to run, if any exist. Or else if no threads
- * remain to be executed, return NULL.
+ * @return The next chosen thread to run, if any exist. Or else if the current
+ * execution should terminate, return NULL.
*/
-Thread * ModelChecker::get_next_thread(ModelAction *curr)
+Thread * ModelChecker::get_next_thread()
{
thread_id_t tid;
- if (curr != NULL) {
- /* Do not split atomic actions. */
- if (curr->is_rmwr())
- return get_thread(curr);
- else if (curr->get_type() == THREAD_CREATE)
- return curr->get_thread_operand();
- }
-
/*
* Have we completed exploring the preselected path? Then let the
* scheduler decide
if (unfair)
continue;
}
+
+ /* See if CHESS-like yield fairness allows */
+ if (model->params.yieldon) {
+ bool unfair = false;
+ for (int t = 0; t < node->get_num_threads(); t++) {
+ thread_id_t tother = int_to_id(t);
+ if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
+ unfair = true;
+ break;
+ }
+ }
+ if (unfair)
+ continue;
+ }
+
/* Cache the latest backtracking point */
set_latest_backtrack(prev);
bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
{
ASSERT(rf);
+ ASSERT(rf->is_write());
+
act->set_read_from(rf);
if (act->is_acquire()) {
rel_heads_list_t release_heads;
wake_up_sleeping_actions(curr);
+ /* Compute fairness information for CHESS yield algorithm */
+ if (model->params.yieldon) {
+ curr->get_node()->update_yield(scheduler);
+ }
+
/* Add the action to lists before any other model-checking tasks */
if (!second_part_of_rmw)
add_action_to_lists(curr);
int uninit_id = -1;
action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
if (list->empty() && act->is_atomic_var()) {
- uninit = new_uninitialized_action(act->get_location());
+ uninit = get_uninitialized_action(act);
uninit_id = id_to_int(uninit->get_tid());
- list->push_back(uninit);
+ list->push_front(uninit);
}
list->push_back(act);
}
/**
- * @brief Create a new action representing an uninitialized atomic
- * @param location The memory location of the atomic object
- * @return A pointer to a new ModelAction
+ * @brief Get an action representing an uninitialized atomic
+ *
+ * This function may create a new one or try to retrieve one from the NodeStack
+ *
+ * @param curr The current action, which prompts the creation of an UNINIT action
+ * @return A pointer to the UNINIT ModelAction
*/
-ModelAction * ModelChecker::new_uninitialized_action(void *location) const
+ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
{
- ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
- act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
+ Node *node = curr->get_node();
+ ModelAction *act = node->get_uninit_action();
+ if (!act) {
+ act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
+ node->set_uninit_action(act);
+ }
act->create_cv(NULL);
return act;
}
unsigned int hash = 0;
for (it = list->begin(); it != list->end(); it++) {
- (*it)->print();
+ const ModelAction *act = *it;
+ if (act->get_seq_number() > 0)
+ act->print();
hash = hash^(hash<<3)^((*it)->hash());
}
model_print("HASH %u\n", hash);
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
- Thread *next_thrd = get_next_thread(curr);
+ Thread *next_thrd = NULL;
+ if (curr)
+ next_thrd = action_select_next_thread(curr);
+ if (!next_thrd)
+ next_thrd = get_next_thread();
DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
next_thrd ? id_to_int(next_thrd->get_id()) : -1);