thrd_last_fence_release(),
node_stack(node_stack),
priv(new struct model_snapshot_members ()),
- mo_graph(new CycleGraph()),
+ mo_graph(new CycleGraph()),
fuzzer(new Fuzzer())
{
/* Initialize a model-checker thread, for special ModelActions */
ASSERT(rf);
-
- if (r_modification_order(curr, rf, priorset)) {
+ bool canprune = false;
+ if (r_modification_order(curr, rf, priorset, &canprune)) {
for(unsigned int i=0;i<priorset->size();i++) {
mo_graph->addEdge((*priorset)[i], rf);
}
read_from(curr, rf);
get_thread(curr)->set_return_value(curr->get_return_value());
delete priorset;
+ if (canprune && curr->get_type() == ATOMIC_READ) {
+ int tid = id_to_int(curr->get_tid());
+ (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
+ }
return;
}
priorset->clear();
}
case ATOMIC_WAIT:
case ATOMIC_UNLOCK: {
+ //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
+
/* wake up the other threads */
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
* @return True if modification order edges were added; false otherwise
*/
-bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset)
+bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
if (curr->is_seqcst())
last_sc_write = get_last_seq_cst_write(curr);
+ int tid = curr->get_tid();
+ ModelAction *prev_same_thread = NULL;
/* Iterate over all threads */
- for (i = 0;i < thrd_lists->size();i++) {
- /* Last SC fence in thread i */
+ for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
+ /* Last SC fence in thread tid */
ModelAction *last_sc_fence_thread_local = NULL;
- if (int_to_id((int)i) != curr->get_tid())
- last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
+ if (i != 0)
+ last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
- /* Last SC fence in thread i, before last SC fence in current thread */
+ /* Last SC fence in thread tid, before last SC fence in current thread */
ModelAction *last_sc_fence_thread_before = NULL;
if (last_sc_fence_local)
- last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
+ last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
+
+ //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
+ if (prev_same_thread != NULL &&
+ (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
+ (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
+ continue;
+ }
/* Iterate over actions in thread, starting from most recent */
- action_list_t *list = &(*thrd_lists)[i];
+ action_list_t *list = &(*thrd_lists)[tid];
action_list_t::reverse_iterator rit;
for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
* before" curr
*/
if (act->happens_before(curr)) {
+ if (i==0) {
+ if (last_sc_fence_local == NULL ||
+ (*last_sc_fence_local < *prev_same_thread)) {
+ prev_same_thread = act;
+ }
+ }
if (act->is_write()) {
if (mo_graph->checkReachable(rf, act))
return false;
if (mo_graph->checkReachable(rf, prevrf))
return false;
priorset->push_back(prevrf);
+ } else {
+ if (act->get_tid() == curr->get_tid()) {
+ //Can prune curr from obj list
+ *canprune = true;
+ }
}
}
break;