/** Initializes a CycleGraph object. */
CycleGraph::CycleGraph() :
- hasCycles(false)
+ hasCycles(false),
+ oldCycles(false)
{
}
// Check for Cycles
hasCycles=checkReachable(tonode, fromnode);
}
+
+ rollbackvector.push_back(fromnode);
fromnode->addEdge(tonode);
CycleNode * rmwnode=fromnode->getRMW();
// Check for Cycles
hasCycles=checkReachable(tonode, rmwnode);
}
+ rollbackvector.push_back(rmwnode);
rmwnode->addEdge(tonode);
}
}
/* Two RMW actions cannot read from the same write. */
if (fromnode->setRMW(rmwnode)) {
hasCycles=true;
+ } else {
+ rmwrollbackvector.push_back(fromnode);
}
/* Transfer all outgoing edges from the from node to the rmw node */
std::vector<CycleNode *> * edges=fromnode->getEdges();
for(unsigned int i=0;i<edges->size();i++) {
CycleNode * tonode=(*edges)[i];
+ rollbackvector.push_back(rmwnode);
rmwnode->addEdge(tonode);
}
-
+ rollbackvector.push_back(fromnode);
fromnode->addEdge(rmwnode);
}
return false;
}
+/** Commit changes to the cyclegraph. */
+void CycleGraph::commitChanges() {
+ rollbackvector.resize(0);
+ rmwrollbackvector.resize(0);
+ oldCycles=hasCycles;
+}
+
+/** Rollback changes to the previous commit. */
+void CycleGraph::rollbackChanges() {
+ for (unsigned int i = 0; i < rollbackvector.size(); i++) {
+ rollbackvector[i]->popEdge();
+ }
+
+ for (unsigned int i = 0; i < rmwrollbackvector.size(); i++) {
+ rmwrollbackvector[i]->clearRMW();
+ }
+
+ hasCycles = oldCycles;
+ rollbackvector.resize(0);
+ rmwrollbackvector.resize(0);
+}
+
/** @returns whether a CycleGraph contains cycles. */
bool CycleGraph::checkForCycles() {
return hasCycles;
* @see CycleGraph::addRMWEdge
*/
bool CycleNode::setRMW(CycleNode *node) {
- CycleNode * oldhasRMW=hasRMW;
+ if (hasRMW!=NULL)
+ return true;
hasRMW=node;
- return (oldhasRMW!=NULL);
+ return false;
}
void addRMWEdge(const ModelAction *from, const ModelAction *rmw);
bool checkReachable(const ModelAction *from, const ModelAction *to);
+ void commitChanges();
+ void rollbackChanges();
private:
CycleNode * getNode(const ModelAction *);
/** @brief A flag: true if this graph contains cycles */
bool hasCycles;
+
+ bool oldCycles;
+
+ std::vector<CycleNode *> rollbackvector;
+ std::vector<CycleNode *> rmwrollbackvector;
};
/** @brief A node within a CycleGraph; corresponds to one ModelAction */
std::vector<CycleNode *> * getEdges();
bool setRMW(CycleNode *);
CycleNode* getRMW();
+ void popEdge() {
+ edges.pop_back();
+ };
+ void clearRMW() {
+ hasRMW=NULL;
+ }
private:
/** @brief The ModelAction that this node represents */
#include "model.h"
#include "snapshot-interface.h"
+static void param_defaults(struct model_params * params) {
+ params->maxreads = 0;
+}
+
static void print_usage() {
printf(
-"Usage: <program name> [OPTIONS]\n"
+"Usage: <program name> [MC_OPTIONS] -- [PROGRAM ARGUMENTS]\n"
"\n"
"Options:\n"
"-h Display this help message and exit\n"
-);
+"-m Maximum times a thread can read from the same write while other writes exist\n"
+"-- Program arguments follow.\n\n");
exit(EXIT_SUCCESS);
}
-static void parse_options(struct model_params *params, int argc, char **argv) {
- const char *shortopts = "h";
+static void parse_options(struct model_params *params, int *argc, char ***argv) {
+ const char *shortopts = "hm:";
int opt;
bool error = false;
- while (!error && (opt = getopt(argc, argv, shortopts)) != -1) {
+ while (!error && (opt = getopt(*argc, *argv, shortopts)) != -1) {
switch (opt) {
case 'h':
print_usage();
break;
+ case 'm':
+ params->maxreads = atoi(optarg);
+ break;
default: /* '?' */
error = true;
break;
}
}
+ (*argc) -= optind;
+ (*argv) += optind;
+
if (error)
print_usage();
}
thrd_t user_thread;
struct model_params params;
- parse_options(¶ms, main_argc, main_argv);
+ param_defaults(¶ms);
+
+ parse_options(¶ms, &main_argc, &main_argv);
//Initialize race detector
initRaceDetector();
node_stack(new NodeStack()),
mo_graph(new CycleGraph()),
failed_promise(false),
+ too_many_reads(false),
asserted(false)
{
/* Allocate this "size" on the snapshotting heap */
DEBUG("+++ Resetting to initial state +++\n");
node_stack->reset_execution();
failed_promise = false;
+ too_many_reads = false;
reset_asserted();
snapshotObject->backTrackBeforeStep(0);
}
return next;
}
+/**
+ * Processes a read or rmw model action.
+ * @param curr is the read model action to process.
+ * @param th is the thread
+ * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
+ * @return True if processing this read updates the mo_graph.
+ */
+
+bool ModelChecker::process_read(ModelAction *curr, Thread * th, bool second_part_of_rmw) {
+ uint64_t value;
+ bool updated=false;
+ while(true) {
+ const ModelAction *reads_from = curr->get_node()->get_read_from();
+ if (reads_from != NULL) {
+ value = reads_from->get_value();
+ /* Assign reads_from, perform release/acquire synchronization */
+ curr->read_from(reads_from);
+ if (!second_part_of_rmw) {
+ check_recency(curr,false);
+ }
+
+ bool r_status=r_modification_order(curr,reads_from);
+
+ if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||!curr->get_node()->future_value_empty())) {
+ mo_graph->rollbackChanges();
+ too_many_reads=false;
+ continue;
+ }
+
+ mo_graph->commitChanges();
+ updated |= r_status;
+ } else {
+ /* Read from future value */
+ value = curr->get_node()->get_future_value();
+ curr->read_from(NULL);
+ Promise *valuepromise = new Promise(curr, value);
+ promises->push_back(valuepromise);
+ }
+ th->set_return_value(value);
+ return updated;
+ }
+}
+
/**
* This is the heart of the model checker routine. It performs model-checking
* actions corresponding to a given "current action." Among other processes, it
*/
Thread * ModelChecker::check_current_action(ModelAction *curr)
{
- bool already_added = false;
+ bool second_part_of_rmw = false;
ASSERT(curr);
if (curr->is_rmwc() || curr->is_rmw()) {
ModelAction *tmp = process_rmw(curr);
- already_added = true;
+ second_part_of_rmw = true;
delete curr;
curr = tmp;
} else {
break;
}
- /* Assign reads_from values */
Thread *th = get_thread(curr->get_tid());
- uint64_t value = VALUE_NONE;
+
bool updated = false;
if (curr->is_read()) {
- const ModelAction *reads_from = curr->get_node()->get_read_from();
- if (reads_from != NULL) {
- value = reads_from->get_value();
- /* Assign reads_from, perform release/acquire synchronization */
- curr->read_from(reads_from);
- if (r_modification_order(curr,reads_from))
- updated = true;
- } else {
- /* Read from future value */
- value = curr->get_node()->get_future_value();
- curr->read_from(NULL);
- Promise *valuepromise = new Promise(curr, value);
- promises->push_back(valuepromise);
- }
- } else if (curr->is_write()) {
- if (w_modification_order(curr))
- updated = true;
- if (resolve_promises(curr))
- updated = true;
+ updated=process_read(curr, th, second_part_of_rmw);
+ }
+
+ if (curr->is_write()) {
+ bool updated_mod_order=w_modification_order(curr);
+ bool updated_promises=resolve_promises(curr);
+ updated=updated_mod_order|updated_promises;
+
+ mo_graph->commitChanges();
+ th->set_return_value(VALUE_NONE);
}
if (updated)
resolve_release_sequences(curr->get_location());
- th->set_return_value(value);
-
/* Add action to list. */
- if (!already_added)
+ if (!second_part_of_rmw)
add_action_to_lists(curr);
Node *currnode = curr->get_node();
Node *parnode = currnode->get_parent();
- if (!parnode->backtrack_empty() || !currnode->read_from_empty() ||
- !currnode->future_value_empty() || !currnode->promise_empty())
- if (!priv->next_backtrack || *curr > *priv->next_backtrack)
- priv->next_backtrack = curr;
+ if ((!parnode->backtrack_empty() ||
+ !currnode->read_from_empty() ||
+ !currnode->future_value_empty() ||
+ !currnode->promise_empty())
+ && (!priv->next_backtrack ||
+ *curr > *priv->next_backtrack)) {
+ priv->next_backtrack = curr;
+ }
set_backtracking(curr);
/** @returns whether the current partial trace is feasible. */
bool ModelChecker::isfeasible() {
- return !mo_graph->checkForCycles() && !failed_promise;
+ return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads;
}
/** Returns whether the current completed trace is feasible. */
return lastread;
}
+/**
+ * Checks whether a thread has read from the same write for too many times
+ * without seeing the effects of a later write.
+ *
+ * Basic idea:
+ * 1) there must a different write that we could read from that would satisfy the modification order,
+ * 2) we must have read from the same value in excess of maxreads times, and
+ * 3) that other write must have been in the reads_from set for maxreads times.
+ *
+ * If so, we decide that the execution is no longer feasible.
+ */
+void ModelChecker::check_recency(ModelAction *curr, bool already_added) {
+ if (params.maxreads != 0) {
+ if (curr->get_node()->get_read_from_size() <= 1)
+ return;
+
+ //Must make sure that execution is currently feasible... We could
+ //accidentally clear by rolling back
+ if (!isfeasible())
+ return;
+
+ std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
+ int tid = id_to_int(curr->get_tid());
+
+ /* Skip checks */
+ if ((int)thrd_lists->size() <= tid)
+ return;
+
+ action_list_t *list = &(*thrd_lists)[tid];
+
+ action_list_t::reverse_iterator rit = list->rbegin();
+ /* Skip past curr */
+ if (!already_added) {
+ for (; (*rit) != curr; rit++)
+ ;
+ /* go past curr now */
+ rit++;
+ }
+
+ action_list_t::reverse_iterator ritcopy=rit;
+ //See if we have enough reads from the same value
+ int count=0;
+ for (; count < params.maxreads; rit++,count++) {
+ if (rit==list->rend())
+ return;
+ ModelAction *act = *rit;
+ if (!act->is_read())
+ return;
+ if (act->get_reads_from() != curr->get_reads_from())
+ return;
+ if (act->get_node()->get_read_from_size() <= 1)
+ return;
+ }
+
+ for (int i=0;i<curr->get_node()->get_read_from_size();i++) {
+ //Get write
+ const ModelAction * write=curr->get_node()->get_read_from_at(i);
+ //Need a different write
+ if (write==curr->get_reads_from())
+ continue;
+
+ /* Test to see whether this is a feasible write to read from*/
+ r_modification_order(curr, write);
+ bool feasiblereadfrom=isfeasible();
+ mo_graph->rollbackChanges();
+
+ if (!feasiblereadfrom)
+ continue;
+ rit=ritcopy;
+
+ bool feasiblewrite=true;
+ //new we need to see if this write works for everyone
+
+ for (int loop=count;loop>0;loop--,rit++) {
+ ModelAction *act=*rit;
+ bool foundvalue=false;
+ for(int j=0;j<act->get_node()->get_read_from_size();j++) {
+ if (act->get_node()->get_read_from_at(i)==write) {
+ foundvalue=true;
+ break;
+ }
+ }
+ if (!foundvalue) {
+ feasiblewrite=false;
+ break;
+ }
+ }
+ if (feasiblewrite) {
+ too_many_reads = true;
+ return;
+ }
+ }
+ }
+}
+
/**
* Updates the mo_graph with the constraints imposed from the current read.
* @param curr The current action. Must be a read.
/* Include at most one act per-thread that "happens before" curr */
if (act->happens_before(curr)) {
- if (act->is_read())
- mo_graph->addEdge(act->get_reads_from(), curr);
- else
+ /*
+ * Note: if act is RMW, just add edge:
+ * act --mo--> curr
+ * The following edge should be handled elsewhere:
+ * readfrom(act) --mo--> act
+ */
+ if (act->is_write())
mo_graph->addEdge(act, curr);
+ else if (act->is_read() && act->get_reads_from() != NULL)
+ mo_graph->addEdge(act->get_reads_from(), curr);
added = true;
break;
} else if (act->is_read() && !act->is_synchronizing(curr) &&
* the model checker.
*/
struct model_params {
+ int maxreads;
};
/**
*/
void set_current_action(ModelAction *act) { priv->current_action = act; }
Thread * check_current_action(ModelAction *curr);
+ bool process_read(ModelAction *curr, Thread * th, bool second_part_of_rmw);
bool take_step();
+ void check_recency(ModelAction *curr, bool already_added);
ModelAction * get_last_conflict(ModelAction *act);
void set_backtracking(ModelAction *act);
Thread * get_next_thread(ModelAction *curr);
*/
CycleGraph *mo_graph;
bool failed_promise;
+ bool too_many_reads;
bool asserted;
};
return future_values[future_index];
}
+int Node::get_read_from_size() {
+ return may_read_from.size();
+}
+
+const ModelAction * Node::get_read_from_at(int i) {
+ return may_read_from[i];
+}
+
/**
* Gets the next 'may_read_from' action from this Node. Only valid for a node
* where this->action is a 'read'.
const ModelAction * get_read_from();
bool increment_read_from();
bool read_from_empty();
+ int get_read_from_size();
+ const ModelAction * get_read_from_at(int i);
void set_promise(unsigned int i);
bool get_promise(unsigned int i);
static inline void read_lock(rwlock_t *rw)
{
- int currentvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
- while (currentvalue<0) {
+ int priorvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ while (priorvalue<=0) {
atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
do {
- currentvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed);
- } while(currentvalue<=0);
- currentvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ priorvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed);
+ } while(priorvalue<=0);
+ priorvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
}
}
static inline void write_lock(rwlock_t *rw)
{
- int currentvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
- while (currentvalue!=0) {
+ int priorvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ while (priorvalue!=RW_LOCK_BIAS) {
atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
do {
- currentvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed);
- } while(currentvalue!=RW_LOCK_BIAS);
- currentvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ priorvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed);
+ } while(priorvalue!=RW_LOCK_BIAS);
+ priorvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
}
}
static inline int read_trylock(rwlock_t *rw)
{
- int currentvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
- if (currentvalue>=0)
+ int priorvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ if (priorvalue>0)
return 1;
atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
static inline int write_trylock(rwlock_t *rw)
{
- int currentvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
- if (currentvalue>=0)
+ int priorvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ if (priorvalue==RW_LOCK_BIAS)
return 1;
atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);