mo_graph(new CycleGraph()),
failed_promise(false),
too_many_reads(false),
- asserted(false)
+ asserted(false),
+ bad_synchronization(false)
{
/* Allocate this "size" on the snapshotting heap */
priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
node_stack->reset_execution();
failed_promise = false;
too_many_reads = false;
+ bad_synchronization = false;
reset_asserted();
snapshotObject->backTrackBeforeStep(0);
}
bool r_status = false;
if (!second_part_of_rmw) {
- check_recency(curr);
+ check_recency(curr, reads_from);
r_status = r_modification_order(curr, reads_from);
}
*
* The unlock operation has to re-enable all of the threads that are
* waiting on the lock.
+ *
+ * @return True if synchronization was updated; false otherwise
*/
-void ModelChecker::process_mutex(ModelAction *curr) {
+bool ModelChecker::process_mutex(ModelAction *curr) {
std::mutex *mutex = (std::mutex *)curr->get_location();
struct std::mutex_state *state = mutex->get_state();
switch (curr->get_type()) {
state->islocked = true;
ModelAction *unlock = get_last_unlock(curr);
//synchronize with the previous unlock statement
- if (unlock != NULL)
+ if (unlock != NULL) {
curr->synchronize_with(unlock);
+ return true;
+ }
break;
}
case ATOMIC_UNLOCK: {
default:
ASSERT(0);
}
+ return false;
}
/**
build_reads_from_past(curr);
curr = newcurr;
+ /* Initialize work_queue with the "current action" work */
work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
while (!work_queue.empty()) {
switch (work.type) {
case WORK_CHECK_CURR_ACTION: {
ModelAction *act = work.action;
- bool updated = false;
+ bool update = false; /* update this location's release seq's */
+ bool update_all = false; /* update all release seq's */
- process_thread_action(curr);
+ if (process_thread_action(curr))
+ update_all = true;
if (act->is_read() && process_read(act, second_part_of_rmw))
- updated = true;
+ update = true;
if (act->is_write() && process_write(act))
- updated = true;
+ update = true;
- if (act->is_mutex_op())
- process_mutex(act);
+ if (act->is_mutex_op() && process_mutex(act))
+ update_all = true;
- if (updated)
+ if (update_all)
+ work_queue.push_back(CheckRelSeqWorkEntry(NULL));
+ else if (update)
work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
break;
}
if (w_modification_order(act))
updated = true;
}
+ mo_graph->commitChanges();
if (updated)
work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
/** @return whether the current partial trace is feasible. */
bool ModelChecker::isfeasible() {
+ if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
+ DEBUG("Infeasible: RMW violation\n");
+
return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
}
DEBUG("Infeasible: failed promise\n");
if (too_many_reads)
DEBUG("Infeasible: too many reads\n");
+ if (bad_synchronization)
+ DEBUG("Infeasible: bad synchronization ordering\n");
if (promises_expired())
DEBUG("Infeasible: promises expired\n");
}
- return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !promises_expired();
+ return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
}
/** Returns whether the current completed trace is feasible. */
*
* If so, we decide that the execution is no longer feasible.
*/
-void ModelChecker::check_recency(ModelAction *curr) {
+void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
if (params.maxreads != 0) {
+
if (curr->get_node()->get_read_from_size() <= 1)
return;
-
//Must make sure that execution is currently feasible... We could
//accidentally clear by rolling back
if (!isfeasible())
return;
-
std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
int tid = id_to_int(curr->get_tid());
/* Skip checks */
if ((int)thrd_lists->size() <= tid)
return;
-
action_list_t *list = &(*thrd_lists)[tid];
action_list_t::reverse_iterator rit = list->rbegin();
ModelAction *act = *rit;
if (!act->is_read())
return;
- if (act->get_reads_from() != curr->get_reads_from())
+
+ if (act->get_reads_from() != rf)
return;
if (act->get_node()->get_read_from_size() <= 1)
return;
}
-
for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
//Get write
const ModelAction * write = curr->get_node()->get_read_from_at(i);
+
//Need a different write
- if (write==curr->get_reads_from())
+ if (write==rf)
continue;
/* Test to see whether this is a feasible write to read from*/
*/
bool ModelChecker::release_seq_head(const ModelAction *rf, rel_heads_list_t *release_heads) const
{
+ /* Only check for release sequences if there are no cycles */
+ if (mo_graph->checkForCycles())
+ return false;
+
while (rf) {
ASSERT(rf->is_write());
bool future_ordered = false;
ModelAction *last = get_last_action(int_to_id(i));
- if (last && rf->happens_before(last))
+ if (last && (rf->happens_before(last) ||
+ last->get_type() == THREAD_FINISH))
future_ordered = true;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
work_queue->push_back(MOEdgeWorkEntry(act));
/* propagate synchronization to later actions */
- action_list_t::reverse_iterator it = action_trace->rbegin();
- for (; (*it) != act; it++) {
- ModelAction *propagate = *it;
+ action_list_t::reverse_iterator rit = action_trace->rbegin();
+ for (; (*rit) != act; rit++) {
+ ModelAction *propagate = *rit;
if (act->happens_before(propagate)) {
propagate->synchronize_with(act);
/* Re-check 'propagate' for mo_graph edges */
Promise *promise = (*promises)[promise_index];
if (write->get_node()->get_promise(i)) {
ModelAction *read = promise->get_action();
- read->read_from(write);
if (read->is_rmw()) {
mo_graph->addRMWEdge(write, read);
}
+ read->read_from(write);
//First fix up the modification order for actions that happened
//before the read
r_modification_order(read, write);
//Next fix up the modification order for actions that happened
//after the read.
post_r_modification_order(read, write);
+
promises->erase(promises->begin() + promise_index);
resolved = true;
} else