#include <stdio.h>
+#include <algorithm>
#include "model.h"
#include "action.h"
ModelChecker::ModelChecker(struct model_params params) :
/* Initialize default scheduler */
scheduler(new Scheduler()),
- /* First thread created will have id INITIAL_THREAD_ID */
- next_thread_id(INITIAL_THREAD_ID),
- used_sequence_numbers(0),
num_executions(0),
params(params),
- current_action(NULL),
diverge(NULL),
- nextThread(NULL),
action_trace(new action_list_t()),
thread_map(new HashTable<int, Thread *, int>()),
obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
lazy_sync_with_release(new HashTable<void *, std::list<ModelAction *>, uintptr_t, 4>()),
thrd_last_action(new std::vector<ModelAction *>(1)),
node_stack(new NodeStack()),
- next_backtrack(NULL),
mo_graph(new CycleGraph()),
failed_promise(false),
+ too_many_reads(false),
asserted(false)
{
+ /* Allocate this "size" on the snapshotting heap */
+ priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
+ /* First thread created will have id INITIAL_THREAD_ID */
+ priv->next_thread_id = INITIAL_THREAD_ID;
+
+ lazy_sync_size = &priv->lazy_sync_size;
}
/** @brief Destructor */
{
DEBUG("+++ Resetting to initial state +++\n");
node_stack->reset_execution();
- current_action = NULL;
- next_thread_id = INITIAL_THREAD_ID;
- used_sequence_numbers = 0;
- nextThread = NULL;
- next_backtrack = NULL;
failed_promise = false;
+ too_many_reads = false;
reset_asserted();
snapshotObject->backTrackBeforeStep(0);
}
/** @returns a thread ID for a new Thread */
thread_id_t ModelChecker::get_next_id()
{
- return next_thread_id++;
+ return priv->next_thread_id++;
}
/** @returns the number of user threads created during this execution */
int ModelChecker::get_num_threads()
{
- return next_thread_id;
+ return priv->next_thread_id;
}
/** @returns a sequence number for a new ModelAction */
modelclock_t ModelChecker::get_next_seq_num()
{
- return ++used_sequence_numbers;
+ return ++priv->used_sequence_numbers;
}
/**
return;
/* Cache the latest backtracking point */
- if (!next_backtrack || *prev > *next_backtrack)
- next_backtrack = prev;
+ if (!priv->next_backtrack || *prev > *priv->next_backtrack)
+ priv->next_backtrack = prev;
/* If this is a new backtracking point, mark the tree */
if (!node->set_backtrack(t->get_id()))
*/
ModelAction * ModelChecker::get_next_backtrack()
{
- ModelAction *next = next_backtrack;
- next_backtrack = NULL;
+ ModelAction *next = priv->next_backtrack;
+ priv->next_backtrack = NULL;
return next;
}
uint64_t value = VALUE_NONE;
bool updated = false;
if (curr->is_read()) {
- const ModelAction *reads_from = curr->get_node()->get_read_from();
- if (reads_from != NULL) {
- value = reads_from->get_value();
- /* Assign reads_from, perform release/acquire synchronization */
- curr->read_from(reads_from);
- if (r_modification_order(curr,reads_from))
- updated = true;
- } else {
- /* Read from future value */
- value = curr->get_node()->get_future_value();
- curr->read_from(NULL);
- Promise *valuepromise = new Promise(curr, value);
- promises->push_back(valuepromise);
+ while(true) {
+ const ModelAction *reads_from = curr->get_node()->get_read_from();
+ if (reads_from != NULL) {
+ value = reads_from->get_value();
+ /* Assign reads_from, perform release/acquire synchronization */
+ curr->read_from(reads_from);
+ if (!already_added)
+ check_recency(curr,false);
+
+ bool r_status=r_modification_order(curr,reads_from);
+
+ if (!isfeasible()&&(curr->get_node()->increment_read_from()||!curr->get_node()->future_value_empty())) {
+ mo_graph->rollbackChanges();
+ too_many_reads=false;
+ continue;
+ }
+
+ mo_graph->commitChanges();
+ updated |= r_status;
+ } else {
+ /* Read from future value */
+ value = curr->get_node()->get_future_value();
+ curr->read_from(NULL);
+ Promise *valuepromise = new Promise(curr, value);
+ promises->push_back(valuepromise);
+ }
+ break;
}
} else if (curr->is_write()) {
if (w_modification_order(curr))
updated = true;
if (resolve_promises(curr))
updated = true;
+ mo_graph->commitChanges();
}
if (updated)
if (!parnode->backtrack_empty() || !currnode->read_from_empty() ||
!currnode->future_value_empty() || !currnode->promise_empty())
- if (!next_backtrack || *curr > *next_backtrack)
- next_backtrack = curr;
+ if (!priv->next_backtrack || *curr > *priv->next_backtrack)
+ priv->next_backtrack = curr;
set_backtracking(curr);
/* Do not split atomic actions. */
if (curr->is_rmwr())
return thread_current();
+ /* The THREAD_CREATE action points to the created Thread */
+ else if (curr->get_type() == THREAD_CREATE)
+ return (Thread *)curr->get_location();
else
return get_next_replay_thread();
}
/** @returns whether the current partial trace must be a prefix of a
* feasible trace. */
-
bool ModelChecker::isfeasibleprefix() {
- return promises->size()==0;
+ return promises->size() == 0 && *lazy_sync_size == 0;
}
/** @returns whether the current partial trace is feasible. */
bool ModelChecker::isfeasible() {
- return !mo_graph->checkForCycles() && !failed_promise;
+ return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads;
}
/** Returns whether the current completed trace is feasible. */
return lastread;
}
+/**
+ * Checks whether a thread has read from the same write for too many times.
+ * @todo This may be more subtle than this code segment addresses at this
+ * point... Potential problems to ponder and fix:
+ * (1) What if the reads_from set keeps changing such that there is no common
+ * write?
+ * (2) What if the problem is that the other writes would break modification
+ * order.
+ */
+void ModelChecker::check_recency(ModelAction *curr, bool already_added) {
+ if (params.maxreads != 0) {
+ if (curr->get_node()->get_read_from_size() <= 1)
+ return;
+
+ std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ int tid = id_to_int(curr->get_tid());
+
+ /* Skip checks */
+ if ((int)thrd_lists->size() <= tid)
+ return;
+
+ action_list_t *list = &(*thrd_lists)[tid];
+
+ action_list_t::reverse_iterator rit = list->rbegin();
+ /* Skip past curr */
+ if (!already_added) {
+ for (; (*rit) != curr; rit++)
+ ;
+ /* go past curr now */
+ rit++;
+ }
+
+ int count=0;
+ for (; rit != list->rend(); rit++) {
+ ModelAction *act = *rit;
+ if (!act->is_read())
+ return;
+ if (act->get_reads_from() != curr->get_reads_from())
+ return;
+ if (act->get_node()->get_read_from_size() <= 1)
+ return;
+ count++;
+ if (count >= params.maxreads) {
+ /* We've read from the same write for too many times */
+ too_many_reads = true;
+ }
+ }
+ }
+}
+
/**
* Updates the mo_graph with the constraints imposed from the current read.
* @param curr The current action. Must be a read.
that read could potentially read from our write.
*/
if (act->get_node()->add_future_value(curr->get_value()) &&
- (!next_backtrack || *act > *next_backtrack))
- next_backtrack = act;
+ (!priv->next_backtrack || *act > *priv->next_backtrack))
+ priv->next_backtrack = act;
}
}
}
if (rf->is_release())
release_heads->push_back(rf);
if (rf->is_rmw()) {
- if (rf->is_acquire())
+ /* We need a RMW action that is both an acquire and release to stop */
+ /** @todo Need to be smarter here... In the linux lock
+ * example, this will run to the beginning of the program for
+ * every acquire. */
+ if (rf->is_acquire() && rf->is_release())
return true; /* complete */
return release_seq_head(rf->get_reads_from(), release_heads);
}
action_list_t::const_reverse_iterator rit;
/* Find rf in the thread list */
- for (rit = list->rbegin(); rit != list->rend(); rit++)
- if (*rit == rf)
- break;
+ rit = std::find(list->rbegin(), list->rend(), rf);
+ ASSERT(rit != list->rend());
/* Find the last write/release */
for (; rit != list->rend(); rit++)
if (id_to_int(rf->get_tid()) == (int)i)
continue;
list = &(*thrd_lists)[i];
+
+ /* Can we ensure no future writes from this thread may break
+ * the release seq? */
+ bool future_ordered = false;
+
for (rit = list->rbegin(); rit != list->rend(); rit++) {
const ModelAction *act = *rit;
if (!act->is_write())
/* Reach synchronization -> this thread is complete */
if (act->happens_before(release))
break;
- if (rf->happens_before(act))
+ if (rf->happens_before(act)) {
+ future_ordered = true;
continue;
+ }
/* Check modification order */
- if (mo_graph->checkReachable(rf, act))
+ if (mo_graph->checkReachable(rf, act)) {
/* rf --mo--> act */
+ future_ordered = true;
continue;
+ }
if (mo_graph->checkReachable(act, release))
/* act --mo--> release */
break;
}
certain = false;
}
+ if (!future_ordered)
+ return false; /* This thread is uncertain */
}
if (certain)
std::list<ModelAction *> *list;
list = lazy_sync_with_release->get_safe_ptr(act->get_location());
list->push_back(act);
+ (*lazy_sync_size)++;
}
}
propagate->synchronize_with(act);
}
}
- if (complete)
+ if (complete) {
it = list->erase(it);
- else
+ (*lazy_sync_size)--;
+ } else
it++;
}
std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
if (tid >= (int)vec->size())
- vec->resize(next_thread_id);
+ vec->resize(priv->next_thread_id);
(*vec)[tid].push_back(act);
if ((int)thrd_last_action->size() <= tid)
curr = thread_current();
if (curr) {
if (curr->get_state() == THREAD_READY) {
- ASSERT(current_action);
- nextThread = check_current_action(current_action);
- current_action = NULL;
+ ASSERT(priv->current_action);
+ priv->nextThread = check_current_action(priv->current_action);
+ priv->current_action = NULL;
if (!curr->is_blocked() && !curr->is_complete())
scheduler->add_thread(curr);
} else {
ASSERT(false);
}
}
- next = scheduler->next_thread(nextThread);
+ next = scheduler->next_thread(priv->nextThread);
/* Infeasible -> don't take any more steps */
if (!isfeasible())