X-Git-Url: http://plrg.eecs.uci.edu/git/?p=c11tester.git;a=blobdiff_plain;f=execution.cc;h=63c0ac7f23b255858d96e2c5b7f067411b18fa74;hp=54e14c30fd03cb924ccc300cb3bfd84388c6a550;hb=938f4f938086a967e9cd101ecca164003f33f03c;hpb=ee118c7912f975c32ccaf0edbc0b6afd49a365aa diff --git a/execution.cc b/execution.cc index 54e14c30..63c0ac7f 100644 --- a/execution.cc +++ b/execution.cc @@ -1692,6 +1692,12 @@ void ModelExecution::removeAction(ModelAction *act) { SnapVector *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location()); (*vec)[act->get_tid()].erase(listref); } + //Clear it from last_sc_map + if (obj_last_sc_map.get(act->get_location()) == act) { + obj_last_sc_map.remove(act->get_location()); + } + + //Remove from Cyclegraph mo_graph->freeAction(act); } @@ -1714,10 +1720,14 @@ ClockVector * ModelExecution::computeMinimalCV() { return cvmin; } -//Options... -//How often to check for memory -//How much of the trace to always keep -//Whether to sacrifice completeness...i.e., remove visible writes +void ModelExecution::fixupLastAct(ModelAction *act) { +//Create a standin ModelAction + ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, get_thread(act->get_tid())); + newact->set_seq_number(get_next_seq_num()); + newact->create_cv(act); + newact->set_last_fence_release(act->get_last_fence_release()); + add_action_to_lists(newact, false); +} void ModelExecution::collectActions() { //Compute minimal clock vector for all live threads @@ -1769,6 +1779,28 @@ void ModelExecution::collectActions() { } } } + for (sllnode * it2 = action_trace.end();it2 != it;) { + ModelAction *act = it2->getVal(); + //Do iteration early in case we delete the act + it2=it2->getPrev(); + bool islastact = false; + ModelAction *lastact = get_last_action(act->get_tid()); + if (act == lastact) { + Thread * th = get_thread(act); + islastact = !th->is_complete(); + } + + if (act->is_read() && act->get_reads_from()->is_free()) { + if (act->is_rmw()) { + act->set_type(ATOMIC_WRITE); + } + removeAction(act); + if (islastact) { + fixupLastAct(act); + } + delete act; + } + } for (;it != NULL;) { ModelAction *act = it->getVal(); //Do iteration early since we may delete node... @@ -1781,11 +1813,13 @@ void ModelExecution::collectActions() { } if (act->is_read()) { - if (islastact) { - act->set_read_from(NULL); - continue; + if (act->is_rmw()) { + act->set_type(ATOMIC_WRITE); } else if (act->get_reads_from()->is_free()) { removeAction(act); + if (islastact) { + fixupLastAct(act); + } delete act; } else { const ModelAction *rel_fence =act->get_last_fence_release(); @@ -1798,13 +1832,16 @@ void ModelExecution::collectActions() { act->set_last_fence_release(NULL); } } - } else if (islastact) { - continue; } else if (act->is_free()) { removeAction(act); + if (islastact) { + fixupLastAct(act); + } delete act; } else if (act->is_write()) { //Do nothing with write that hasn't been marked to be freed + } else if (islastact) { + //Keep the last action for non-read/write actions } else if (act->is_fence()) { //Note that acquire fences can always be safely //removed, but could incur extra overheads in @@ -1826,7 +1863,7 @@ void ModelExecution::collectActions() { delete act; } } else { - //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish + //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace //need to keep most recent unlock/wait for each lock if(act->is_unlock() || act->is_wait()) {