thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
mo_graph(new CycleGraph()),
- fuzzer(new NewFuzzer()),
+ fuzzer(new Fuzzer()),
isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
model_thread = new Thread(get_next_id());
add_thread(model_thread);
- fuzzer->register_engine(m->get_history(), this);
+ fuzzer->register_engine(this);
scheduler->register_engine(this);
#ifdef TLS
pthread_key_create(&pthreadkey, tlsdestructor);
* Processes a read model action.
* @param curr is the read model action to process.
* @param rf_set is the set of model actions we can possibly read from
- * @return True if processing this read updates the mo_graph.
+ * @return True if the read can be pruned from the thread map list.
*/
bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
read_from(curr, rf);
get_thread(curr)->set_return_value(curr->get_return_value());
delete priorset;
- if (canprune && curr->get_type() == ATOMIC_READ) {
- int tid = id_to_int(curr->get_tid());
- (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
- curr->setThrdMapRef(NULL);
- }
- return true;
+ return canprune && (curr->get_type() == ATOMIC_READ);
}
priorset->clear();
(*rf_set)[index] = rf_set->back();
if (newly_explored && curr->is_read())
rf_set = build_may_read_from(curr);
+ bool canprune = false;
+
if (curr->is_read() && !second_part_of_rmw) {
- process_read(curr, rf_set);
+ canprune = process_read(curr, rf_set);
delete rf_set;
} else
ASSERT(rf_set == NULL);
/* Add the action to lists */
if (!second_part_of_rmw)
- add_action_to_lists(curr);
+ add_action_to_lists(curr, canprune);
if (curr->is_write())
add_write_to_lists(curr);
*
* @param act is the ModelAction to add.
*/
-void ModelExecution::add_action_to_lists(ModelAction *act)
+void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
{
int tid = id_to_int(act->get_tid());
if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
for(uint i = oldsize;i < priv->next_thread_id;i++)
new (&(*vec)[i]) action_list_t();
}
- act->setThrdMapRef((*vec)[tid].add_back(act));
+ if (!canprune)
+ act->setThrdMapRef((*vec)[tid].add_back(act));
// Update thrd_last_action, the last action taken by each thread
if ((int)thrd_last_action.size() <= tid)
sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
sllnode<ModelAction*> * rit = list->end();
modelclock_t next_seq = act->get_seq_number();
- if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
+ if (rit == NULL || (rit->getVal()->get_seq_number() <= next_seq))
return list->add_back(act);
else {
for(;rit != NULL;rit=rit->getPrev()) {
- if (rit->getVal()->get_seq_number() == next_seq) {
+ if (rit->getVal()->get_seq_number() <= next_seq) {
return list->insertAfter(rit, act);
}
}
if (rit == NULL) {
act->create_cv(NULL);
return NULL;
- } else if (rit->getVal()->get_seq_number() == next_seq) {
+ } else if (rit->getVal()->get_seq_number() <= next_seq) {
act->create_cv(rit->getVal());
return list->add_back(act);
} else {
for(;rit != NULL;rit=rit->getPrev()) {
- if (rit->getVal()->get_seq_number() == next_seq) {
+ if (rit->getVal()->get_seq_number() <= next_seq) {
act->create_cv(rit->getVal());
return list->insertAfter(rit, act);
}
void *mutex_loc = (void *) act->get_value();
get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
}
- } else if (act->is_write()) {
+ } else if (act->is_free()) {
sllnode<ModelAction *> * listref = act->getActionRef();
if (listref != NULL) {
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
(*vec)[act->get_tid()].erase(listref);
}
+ //Clear it from last_sc_map
+ if (obj_last_sc_map.get(act->get_location()) == act) {
+ obj_last_sc_map.remove(act->get_location());
+ }
+
+
//Remove from Cyclegraph
mo_graph->freeAction(act);
}
return cvmin;
}
-//Options...
-//How often to check for memory
-//How much of the trace to always keep
-//Whether to sacrifice completeness...i.e., remove visible writes
+void ModelExecution::fixupLastAct(ModelAction *act) {
+//Create a standin ModelAction
+ ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, get_thread(act->get_tid()));
+ newact->set_seq_number(get_next_seq_num());
+ newact->create_cv(act);
+ newact->set_last_fence_release(act->get_last_fence_release());
+ add_action_to_lists(newact, false);
+}
void ModelExecution::collectActions() {
//Compute minimal clock vector for all live threads
ClockVector *cvmin = computeMinimalCV();
- cvmin->print();
SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
}
}
}
+ for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
+ ModelAction *act = it2->getVal();
+ //Do iteration early in case we delete the act
+ it2=it2->getPrev();
+ bool islastact = false;
+ ModelAction *lastact = get_last_action(act->get_tid());
+ if (act == lastact) {
+ Thread * th = get_thread(act);
+ islastact = !th->is_complete();
+ }
+
+ if (act->is_read()) {
+ if (act->get_reads_from()->is_free()) {
+ if (act->is_rmw()) {
+ //weaken to write
+ act->set_type(ATOMIC_WRITE);
+ } else {
+ removeAction(act);
+ if (islastact) {
+ fixupLastAct(act);
+ }
+ delete act;
+ continue;
+ }
+ }
+ }
+ const ModelAction *rel_fence =act->get_last_fence_release();
+ if (rel_fence != NULL) {
+ modelclock_t relfenceseq = rel_fence->get_seq_number();
+ thread_id_t relfence_tid = rel_fence->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(relfence_tid);
+ //Remove references to irrelevant release fences
+ if (relfenceseq <= tid_clock)
+ act->set_last_fence_release(NULL);
+ }
+ }
for (;it != NULL;) {
ModelAction *act = it->getVal();
//Do iteration early since we may delete node...
it=it->getPrev();
+ bool islastact = false;
+ ModelAction *lastact = get_last_action(act->get_tid());
+ if (act == lastact) {
+ Thread * th = get_thread(act);
+ islastact = !th->is_complete();
+ }
+
if (act->is_read()) {
if (act->get_reads_from()->is_free()) {
- removeAction(act);
- delete act;
- } else {
- const ModelAction *rel_fence =act->get_last_fence_release();
- if (rel_fence != NULL) {
- modelclock_t relfenceseq = rel_fence->get_seq_number();
- thread_id_t relfence_tid = rel_fence->get_tid();
- modelclock_t tid_clock = cvmin->getClock(relfence_tid);
- //Remove references to irrelevant release fences
- if (relfenceseq <= tid_clock)
- act->set_last_fence_release(NULL);
+ if (act->is_rmw()) {
+ act->set_type(ATOMIC_WRITE);
+ } else {
+ removeAction(act);
+ if (islastact) {
+ fixupLastAct(act);
+ }
+ delete act;
+ continue;
}
}
} else if (act->is_free()) {
removeAction(act);
+ if (islastact) {
+ fixupLastAct(act);
+ }
delete act;
+ continue;
} else if (act->is_write()) {
//Do nothing with write that hasn't been marked to be freed
+ } else if (islastact) {
+ //Keep the last action for non-read/write actions
} else if (act->is_fence()) {
//Note that acquire fences can always be safely
//removed, but could incur extra overheads in
if (actseq <= tid_clock) {
removeAction(act);
delete act;
+ continue;
}
} else {
- //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish
+ //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
//lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
//need to keep most recent unlock/wait for each lock
if(act->is_unlock() || act->is_wait()) {
if (lastlock != act) {
removeAction(act);
delete act;
+ continue;
+ }
+ } else if (act->is_create()) {
+ if (act->get_thread_operand()->is_complete()) {
+ removeAction(act);
+ delete act;
+ continue;
}
} else {
removeAction(act);
delete act;
+ continue;
}
+ const ModelAction *rel_fence =act->get_last_fence_release();
+ if (rel_fence != NULL) {
+ modelclock_t relfenceseq = rel_fence->get_seq_number();
+ thread_id_t relfence_tid = rel_fence->get_tid();
+ modelclock_t tid_clock = cvmin->getClock(relfence_tid);
+ //Remove references to irrelevant release fences
+ if (relfenceseq <= tid_clock)
+ act->set_last_fence_release(NULL);
+ }
+
}
}