model: schedule appropriate fence backtracking points
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* The correct sleep set is in the parent node. */
286                 execute_sleep_set();
287
288                 DEBUG("*** Divergence point ***\n");
289
290                 diverge = NULL;
291         } else {
292                 tid = next->get_tid();
293         }
294         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
295         ASSERT(tid != THREAD_ID_T_NONE);
296         return thread_map->get(id_to_int(tid));
297 }
298
299 /**
300  * We need to know what the next actions of all threads in the sleep
301  * set will be.  This method computes them and stores the actions at
302  * the corresponding thread object's pending action.
303  */
304
305 void ModelChecker::execute_sleep_set()
306 {
307         for (unsigned int i = 0; i < get_num_threads(); i++) {
308                 thread_id_t tid = int_to_id(i);
309                 Thread *thr = get_thread(tid);
310                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
311                         thr->get_pending()->set_sleep_flag();
312                 }
313         }
314 }
315
316 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
317 {
318         for (unsigned int i = 0; i < get_num_threads(); i++) {
319                 Thread *thr = get_thread(int_to_id(i));
320                 if (scheduler->is_sleep_set(thr)) {
321                         ModelAction *pending_act = thr->get_pending();
322                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
323                                 //Remove this thread from sleep set
324                                 scheduler->remove_sleep(thr);
325                 }
326         }
327 }
328
329 /** @brief Alert the model-checker that an incorrectly-ordered
330  * synchronization was made */
331 void ModelChecker::set_bad_synchronization()
332 {
333         priv->bad_synchronization = true;
334 }
335
336 /**
337  * Check whether the current trace has triggered an assertion which should halt
338  * its execution.
339  *
340  * @return True, if the execution should be aborted; false otherwise
341  */
342 bool ModelChecker::has_asserted() const
343 {
344         return priv->asserted;
345 }
346
347 /**
348  * Trigger a trace assertion which should cause this execution to be halted.
349  * This can be due to a detected bug or due to an infeasibility that should
350  * halt ASAP.
351  */
352 void ModelChecker::set_assert()
353 {
354         priv->asserted = true;
355 }
356
357 /**
358  * Check if we are in a deadlock. Should only be called at the end of an
359  * execution, although it should not give false positives in the middle of an
360  * execution (there should be some ENABLED thread).
361  *
362  * @return True if program is in a deadlock; false otherwise
363  */
364 bool ModelChecker::is_deadlocked() const
365 {
366         bool blocking_threads = false;
367         for (unsigned int i = 0; i < get_num_threads(); i++) {
368                 thread_id_t tid = int_to_id(i);
369                 if (is_enabled(tid))
370                         return false;
371                 Thread *t = get_thread(tid);
372                 if (!t->is_model_thread() && t->get_pending())
373                         blocking_threads = true;
374         }
375         return blocking_threads;
376 }
377
378 /**
379  * Check if this is a complete execution. That is, have all thread completed
380  * execution (rather than exiting because sleep sets have forced a redundant
381  * execution).
382  *
383  * @return True if the execution is complete.
384  */
385 bool ModelChecker::is_complete_execution() const
386 {
387         for (unsigned int i = 0; i < get_num_threads(); i++)
388                 if (is_enabled(int_to_id(i)))
389                         return false;
390         return true;
391 }
392
393 /**
394  * @brief Assert a bug in the executing program.
395  *
396  * Use this function to assert any sort of bug in the user program. If the
397  * current trace is feasible (actually, a prefix of some feasible execution),
398  * then this execution will be aborted, printing the appropriate message. If
399  * the current trace is not yet feasible, the error message will be stashed and
400  * printed if the execution ever becomes feasible.
401  *
402  * @param msg Descriptive message for the bug (do not include newline char)
403  * @return True if bug is immediately-feasible
404  */
405 bool ModelChecker::assert_bug(const char *msg)
406 {
407         priv->bugs.push_back(new bug_message(msg));
408
409         if (isfeasibleprefix()) {
410                 set_assert();
411                 return true;
412         }
413         return false;
414 }
415
416 /**
417  * @brief Assert a bug in the executing program, asserted by a user thread
418  * @see ModelChecker::assert_bug
419  * @param msg Descriptive message for the bug (do not include newline char)
420  */
421 void ModelChecker::assert_user_bug(const char *msg)
422 {
423         /* If feasible bug, bail out now */
424         if (assert_bug(msg))
425                 switch_to_master(NULL);
426 }
427
428 /** @return True, if any bugs have been reported for this execution */
429 bool ModelChecker::have_bug_reports() const
430 {
431         return priv->bugs.size() != 0;
432 }
433
434 /** @brief Print bug report listing for this execution (if any bugs exist) */
435 void ModelChecker::print_bugs() const
436 {
437         if (have_bug_reports()) {
438                 model_print("Bug report: %zu bug%s detected\n",
439                                 priv->bugs.size(),
440                                 priv->bugs.size() > 1 ? "s" : "");
441                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
442                         priv->bugs[i]->print();
443         }
444 }
445
446 /**
447  * @brief Record end-of-execution stats
448  *
449  * Must be run when exiting an execution. Records various stats.
450  * @see struct execution_stats
451  */
452 void ModelChecker::record_stats()
453 {
454         stats.num_total++;
455         if (!isfeasibleprefix())
456                 stats.num_infeasible++;
457         else if (have_bug_reports())
458                 stats.num_buggy_executions++;
459         else if (is_complete_execution())
460                 stats.num_complete++;
461         else
462                 stats.num_redundant++;
463 }
464
465 /** @brief Print execution stats */
466 void ModelChecker::print_stats() const
467 {
468         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
469         model_print("Number of redundant executions: %d\n", stats.num_redundant);
470         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
471         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
472         model_print("Total executions: %d\n", stats.num_total);
473         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
474 }
475
476 /**
477  * @brief End-of-exeuction print
478  * @param printbugs Should any existing bugs be printed?
479  */
480 void ModelChecker::print_execution(bool printbugs) const
481 {
482         print_program_output();
483
484         if (DBG_ENABLED() || params.verbose) {
485                 model_print("Earliest divergence point since last feasible execution:\n");
486                 if (earliest_diverge)
487                         earliest_diverge->print();
488                 else
489                         model_print("(Not set)\n");
490
491                 model_print("\n");
492                 print_stats();
493         }
494
495         /* Don't print invalid bugs */
496         if (printbugs)
497                 print_bugs();
498
499         model_print("\n");
500         print_summary();
501 }
502
503 /**
504  * Queries the model-checker for more executions to explore and, if one
505  * exists, resets the model-checker state to execute a new execution.
506  *
507  * @return If there are more executions to explore, return true. Otherwise,
508  * return false.
509  */
510 bool ModelChecker::next_execution()
511 {
512         DBG();
513         /* Is this execution a feasible execution that's worth bug-checking? */
514         bool complete = isfeasibleprefix() && (is_complete_execution() ||
515                         have_bug_reports());
516
517         /* End-of-execution bug checks */
518         if (complete) {
519                 if (is_deadlocked())
520                         assert_bug("Deadlock detected");
521
522                 checkDataRaces();
523         }
524
525         record_stats();
526
527         /* Output */
528         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
529                 print_execution(complete);
530         else
531                 clear_program_output();
532
533         if (complete)
534                 earliest_diverge = NULL;
535
536         if ((diverge = get_next_backtrack()) == NULL)
537                 return false;
538
539         if (DBG_ENABLED()) {
540                 model_print("Next execution will diverge at:\n");
541                 diverge->print();
542         }
543
544         reset_to_initial_state();
545         return true;
546 }
547
548 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
549 {
550         /* Only perform release/acquire fence backtracking for stores */
551         if (!act->is_write())
552                 return NULL;
553
554         /* Find a fence-release (or, act is a release) */
555         ModelAction *last_release;
556         if (act->is_release())
557                 last_release = act;
558         else
559                 last_release = get_last_fence_release(act->get_tid());
560         if (!last_release)
561                 return NULL;
562
563         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
564         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
565         bool found_acquire_fences = false;
566         /* Find a prior:
567          *   load-acquire
568          * or
569          *   load --sb-> fence-acquire */
570         action_list_t *list = action_trace;
571         action_list_t::reverse_iterator rit;
572         for (rit = list->rbegin(); rit != list->rend(); rit++) {
573                 ModelAction *prev = *rit;
574                 if (act->same_thread(prev))
575                         continue;
576
577                 int tid = id_to_int(prev->get_tid());
578
579                 if (prev->is_read() && act->same_var(prev)) {
580                         if (prev->is_acquire()) {
581                                 /* Found most recent load-acquire, don't need
582                                  * to search for more fences */
583                                 if (!found_acquire_fences)
584                                         return NULL;
585                         } else {
586                                 prior_loads[tid] = prev;
587                         }
588                 }
589                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
590                         found_acquire_fences = true;
591                         acquire_fences[tid] = prev;
592                 }
593         }
594
595         ModelAction *latest_backtrack = NULL;
596         for (unsigned int i = 0; i < acquire_fences.size(); i++)
597                 if (acquire_fences[i] && prior_loads[i])
598                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
599                                 latest_backtrack = acquire_fences[i];
600         return latest_backtrack;
601 }
602
603 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
604 {
605         switch (act->get_type()) {
606         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
607         case ATOMIC_READ:
608         case ATOMIC_WRITE:
609         case ATOMIC_RMW: {
610                 ModelAction *ret = NULL;
611
612                 /* linear search: from most recent to oldest */
613                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
614                 action_list_t::reverse_iterator rit;
615                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
616                         ModelAction *prev = *rit;
617                         if (prev->could_synchronize_with(act)) {
618                                 ret = prev;
619                                 break;
620                         }
621                 }
622
623                 ModelAction *ret2 = get_last_fence_conflict(act);
624                 if (!ret2)
625                         return ret;
626                 if (!ret)
627                         return ret2;
628                 if (*ret < *ret2)
629                         return ret2;
630                 return ret;
631         }
632         case ATOMIC_LOCK:
633         case ATOMIC_TRYLOCK: {
634                 /* linear search: from most recent to oldest */
635                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
636                 action_list_t::reverse_iterator rit;
637                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
638                         ModelAction *prev = *rit;
639                         if (act->is_conflicting_lock(prev))
640                                 return prev;
641                 }
642                 break;
643         }
644         case ATOMIC_UNLOCK: {
645                 /* linear search: from most recent to oldest */
646                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
647                 action_list_t::reverse_iterator rit;
648                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
649                         ModelAction *prev = *rit;
650                         if (!act->same_thread(prev) && prev->is_failed_trylock())
651                                 return prev;
652                 }
653                 break;
654         }
655         case ATOMIC_WAIT: {
656                 /* linear search: from most recent to oldest */
657                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
658                 action_list_t::reverse_iterator rit;
659                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
660                         ModelAction *prev = *rit;
661                         if (!act->same_thread(prev) && prev->is_failed_trylock())
662                                 return prev;
663                         if (!act->same_thread(prev) && prev->is_notify())
664                                 return prev;
665                 }
666                 break;
667         }
668
669         case ATOMIC_NOTIFY_ALL:
670         case ATOMIC_NOTIFY_ONE: {
671                 /* linear search: from most recent to oldest */
672                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
673                 action_list_t::reverse_iterator rit;
674                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
675                         ModelAction *prev = *rit;
676                         if (!act->same_thread(prev) && prev->is_wait())
677                                 return prev;
678                 }
679                 break;
680         }
681         default:
682                 break;
683         }
684         return NULL;
685 }
686
687 /** This method finds backtracking points where we should try to
688  * reorder the parameter ModelAction against.
689  *
690  * @param the ModelAction to find backtracking points for.
691  */
692 void ModelChecker::set_backtracking(ModelAction *act)
693 {
694         Thread *t = get_thread(act);
695         ModelAction *prev = get_last_conflict(act);
696         if (prev == NULL)
697                 return;
698
699         Node *node = prev->get_node()->get_parent();
700
701         int low_tid, high_tid;
702         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
703                 low_tid = id_to_int(act->get_tid());
704                 high_tid = low_tid + 1;
705         } else {
706                 low_tid = 0;
707                 high_tid = get_num_threads();
708         }
709
710         for (int i = low_tid; i < high_tid; i++) {
711                 thread_id_t tid = int_to_id(i);
712
713                 /* Make sure this thread can be enabled here. */
714                 if (i >= node->get_num_threads())
715                         break;
716
717                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
718                 if (node->enabled_status(tid) != THREAD_ENABLED)
719                         continue;
720
721                 /* Check if this has been explored already */
722                 if (node->has_been_explored(tid))
723                         continue;
724
725                 /* See if fairness allows */
726                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
727                         bool unfair = false;
728                         for (int t = 0; t < node->get_num_threads(); t++) {
729                                 thread_id_t tother = int_to_id(t);
730                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
731                                         unfair = true;
732                                         break;
733                                 }
734                         }
735                         if (unfair)
736                                 continue;
737                 }
738                 /* Cache the latest backtracking point */
739                 set_latest_backtrack(prev);
740
741                 /* If this is a new backtracking point, mark the tree */
742                 if (!node->set_backtrack(tid))
743                         continue;
744                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
745                                         id_to_int(prev->get_tid()),
746                                         id_to_int(t->get_id()));
747                 if (DBG_ENABLED()) {
748                         prev->print();
749                         act->print();
750                 }
751         }
752 }
753
754 /**
755  * @brief Cache the a backtracking point as the "most recent", if eligible
756  *
757  * Note that this does not prepare the NodeStack for this backtracking
758  * operation, it only caches the action on a per-execution basis
759  *
760  * @param act The operation at which we should explore a different next action
761  * (i.e., backtracking point)
762  * @return True, if this action is now the most recent backtracking point;
763  * false otherwise
764  */
765 bool ModelChecker::set_latest_backtrack(ModelAction *act)
766 {
767         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
768                 priv->next_backtrack = act;
769                 return true;
770         }
771         return false;
772 }
773
774 /**
775  * Returns last backtracking point. The model checker will explore a different
776  * path for this point in the next execution.
777  * @return The ModelAction at which the next execution should diverge.
778  */
779 ModelAction * ModelChecker::get_next_backtrack()
780 {
781         ModelAction *next = priv->next_backtrack;
782         priv->next_backtrack = NULL;
783         return next;
784 }
785
786 /**
787  * Processes a read or rmw model action.
788  * @param curr is the read model action to process.
789  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
790  * @return True if processing this read updates the mo_graph.
791  */
792 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
793 {
794         uint64_t value = VALUE_NONE;
795         bool updated = false;
796         while (true) {
797                 const ModelAction *reads_from = curr->get_node()->get_read_from();
798                 if (reads_from != NULL) {
799                         mo_graph->startChanges();
800
801                         value = reads_from->get_value();
802                         bool r_status = false;
803
804                         if (!second_part_of_rmw) {
805                                 check_recency(curr, reads_from);
806                                 r_status = r_modification_order(curr, reads_from);
807                         }
808
809                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
810                                 mo_graph->rollbackChanges();
811                                 priv->too_many_reads = false;
812                                 continue;
813                         }
814
815                         read_from(curr, reads_from);
816                         mo_graph->commitChanges();
817                         mo_check_promises(curr, true);
818
819                         updated |= r_status;
820                 } else if (!second_part_of_rmw) {
821                         /* Read from future value */
822                         struct future_value fv = curr->get_node()->get_future_value();
823                         Promise *promise = new Promise(curr, fv);
824                         value = fv.value;
825                         curr->set_read_from_promise(promise);
826                         promises->push_back(promise);
827                         mo_graph->startChanges();
828                         updated = r_modification_order(curr, promise);
829                         mo_graph->commitChanges();
830                 }
831                 get_thread(curr)->set_return_value(value);
832                 return updated;
833         }
834 }
835
836 /**
837  * Processes a lock, trylock, or unlock model action.  @param curr is
838  * the read model action to process.
839  *
840  * The try lock operation checks whether the lock is taken.  If not,
841  * it falls to the normal lock operation case.  If so, it returns
842  * fail.
843  *
844  * The lock operation has already been checked that it is enabled, so
845  * it just grabs the lock and synchronizes with the previous unlock.
846  *
847  * The unlock operation has to re-enable all of the threads that are
848  * waiting on the lock.
849  *
850  * @return True if synchronization was updated; false otherwise
851  */
852 bool ModelChecker::process_mutex(ModelAction *curr)
853 {
854         std::mutex *mutex = NULL;
855         struct std::mutex_state *state = NULL;
856
857         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
858                 mutex = (std::mutex *)curr->get_location();
859                 state = mutex->get_state();
860         } else if (curr->is_wait()) {
861                 mutex = (std::mutex *)curr->get_value();
862                 state = mutex->get_state();
863         }
864
865         switch (curr->get_type()) {
866         case ATOMIC_TRYLOCK: {
867                 bool success = !state->islocked;
868                 curr->set_try_lock(success);
869                 if (!success) {
870                         get_thread(curr)->set_return_value(0);
871                         break;
872                 }
873                 get_thread(curr)->set_return_value(1);
874         }
875                 //otherwise fall into the lock case
876         case ATOMIC_LOCK: {
877                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
878                         assert_bug("Lock access before initialization");
879                 state->islocked = true;
880                 ModelAction *unlock = get_last_unlock(curr);
881                 //synchronize with the previous unlock statement
882                 if (unlock != NULL) {
883                         curr->synchronize_with(unlock);
884                         return true;
885                 }
886                 break;
887         }
888         case ATOMIC_UNLOCK: {
889                 //unlock the lock
890                 state->islocked = false;
891                 //wake up the other threads
892                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
893                 //activate all the waiting threads
894                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
895                         scheduler->wake(get_thread(*rit));
896                 }
897                 waiters->clear();
898                 break;
899         }
900         case ATOMIC_WAIT: {
901                 //unlock the lock
902                 state->islocked = false;
903                 //wake up the other threads
904                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
905                 //activate all the waiting threads
906                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
907                         scheduler->wake(get_thread(*rit));
908                 }
909                 waiters->clear();
910                 //check whether we should go to sleep or not...simulate spurious failures
911                 if (curr->get_node()->get_misc() == 0) {
912                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
913                         //disable us
914                         scheduler->sleep(get_thread(curr));
915                 }
916                 break;
917         }
918         case ATOMIC_NOTIFY_ALL: {
919                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
920                 //activate all the waiting threads
921                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
922                         scheduler->wake(get_thread(*rit));
923                 }
924                 waiters->clear();
925                 break;
926         }
927         case ATOMIC_NOTIFY_ONE: {
928                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
929                 int wakeupthread = curr->get_node()->get_misc();
930                 action_list_t::iterator it = waiters->begin();
931                 advance(it, wakeupthread);
932                 scheduler->wake(get_thread(*it));
933                 waiters->erase(it);
934                 break;
935         }
936
937         default:
938                 ASSERT(0);
939         }
940         return false;
941 }
942
943 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
944 {
945         /* Do more ambitious checks now that mo is more complete */
946         if (mo_may_allow(writer, reader)) {
947                 Node *node = reader->get_node();
948
949                 /* Find an ancestor thread which exists at the time of the reader */
950                 Thread *write_thread = get_thread(writer);
951                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
952                         write_thread = write_thread->get_parent();
953
954                 struct future_value fv = {
955                         writer->get_value(),
956                         writer->get_seq_number() + params.maxfuturedelay,
957                         write_thread->get_id(),
958                 };
959                 if (node->add_future_value(fv))
960                         set_latest_backtrack(reader);
961         }
962 }
963
964 /**
965  * Process a write ModelAction
966  * @param curr The ModelAction to process
967  * @return True if the mo_graph was updated or promises were resolved
968  */
969 bool ModelChecker::process_write(ModelAction *curr)
970 {
971         bool updated_mod_order = w_modification_order(curr);
972         bool updated_promises = resolve_promises(curr);
973
974         if (promises->size() == 0) {
975                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
976                         struct PendingFutureValue pfv = (*futurevalues)[i];
977                         add_future_value(pfv.writer, pfv.act);
978                 }
979                 futurevalues->clear();
980         }
981
982         mo_graph->commitChanges();
983         mo_check_promises(curr, false);
984
985         get_thread(curr)->set_return_value(VALUE_NONE);
986         return updated_mod_order || updated_promises;
987 }
988
989 /**
990  * Process a fence ModelAction
991  * @param curr The ModelAction to process
992  * @return True if synchronization was updated
993  */
994 bool ModelChecker::process_fence(ModelAction *curr)
995 {
996         /*
997          * fence-relaxed: no-op
998          * fence-release: only log the occurence (not in this function), for
999          *   use in later synchronization
1000          * fence-acquire (this function): search for hypothetical release
1001          *   sequences
1002          */
1003         bool updated = false;
1004         if (curr->is_acquire()) {
1005                 action_list_t *list = action_trace;
1006                 action_list_t::reverse_iterator rit;
1007                 /* Find X : is_read(X) && X --sb-> curr */
1008                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1009                         ModelAction *act = *rit;
1010                         if (act == curr)
1011                                 continue;
1012                         if (act->get_tid() != curr->get_tid())
1013                                 continue;
1014                         /* Stop at the beginning of the thread */
1015                         if (act->is_thread_start())
1016                                 break;
1017                         /* Stop once we reach a prior fence-acquire */
1018                         if (act->is_fence() && act->is_acquire())
1019                                 break;
1020                         if (!act->is_read())
1021                                 continue;
1022                         /* read-acquire will find its own release sequences */
1023                         if (act->is_acquire())
1024                                 continue;
1025
1026                         /* Establish hypothetical release sequences */
1027                         rel_heads_list_t release_heads;
1028                         get_release_seq_heads(curr, act, &release_heads);
1029                         for (unsigned int i = 0; i < release_heads.size(); i++)
1030                                 if (!curr->synchronize_with(release_heads[i]))
1031                                         set_bad_synchronization();
1032                         if (release_heads.size() != 0)
1033                                 updated = true;
1034                 }
1035         }
1036         return updated;
1037 }
1038
1039 /**
1040  * @brief Process the current action for thread-related activity
1041  *
1042  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1043  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1044  * synchronization, etc.  This function is a no-op for non-THREAD actions
1045  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1046  *
1047  * @param curr The current action
1048  * @return True if synchronization was updated or a thread completed
1049  */
1050 bool ModelChecker::process_thread_action(ModelAction *curr)
1051 {
1052         bool updated = false;
1053
1054         switch (curr->get_type()) {
1055         case THREAD_CREATE: {
1056                 thrd_t *thrd = (thrd_t *)curr->get_location();
1057                 struct thread_params *params = (struct thread_params *)curr->get_value();
1058                 Thread *th = new Thread(thrd, params->func, params->arg);
1059                 add_thread(th);
1060                 th->set_creation(curr);
1061                 /* Promises can be satisfied by children */
1062                 for (unsigned int i = 0; i < promises->size(); i++) {
1063                         Promise *promise = (*promises)[i];
1064                         if (promise->thread_is_available(curr->get_tid()))
1065                                 promise->add_thread(th->get_id());
1066                 }
1067                 break;
1068         }
1069         case THREAD_JOIN: {
1070                 Thread *blocking = curr->get_thread_operand();
1071                 ModelAction *act = get_last_action(blocking->get_id());
1072                 curr->synchronize_with(act);
1073                 updated = true; /* trigger rel-seq checks */
1074                 break;
1075         }
1076         case THREAD_FINISH: {
1077                 Thread *th = get_thread(curr);
1078                 while (!th->wait_list_empty()) {
1079                         ModelAction *act = th->pop_wait_list();
1080                         scheduler->wake(get_thread(act));
1081                 }
1082                 th->complete();
1083                 /* Completed thread can't satisfy promises */
1084                 for (unsigned int i = 0; i < promises->size(); i++) {
1085                         Promise *promise = (*promises)[i];
1086                         if (promise->thread_is_available(th->get_id()))
1087                                 if (promise->eliminate_thread(th->get_id()))
1088                                         priv->failed_promise = true;
1089                 }
1090                 updated = true; /* trigger rel-seq checks */
1091                 break;
1092         }
1093         case THREAD_START: {
1094                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1095                 break;
1096         }
1097         default:
1098                 break;
1099         }
1100
1101         return updated;
1102 }
1103
1104 /**
1105  * @brief Process the current action for release sequence fixup activity
1106  *
1107  * Performs model-checker release sequence fixups for the current action,
1108  * forcing a single pending release sequence to break (with a given, potential
1109  * "loose" write) or to complete (i.e., synchronize). If a pending release
1110  * sequence forms a complete release sequence, then we must perform the fixup
1111  * synchronization, mo_graph additions, etc.
1112  *
1113  * @param curr The current action; must be a release sequence fixup action
1114  * @param work_queue The work queue to which to add work items as they are
1115  * generated
1116  */
1117 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1118 {
1119         const ModelAction *write = curr->get_node()->get_relseq_break();
1120         struct release_seq *sequence = pending_rel_seqs->back();
1121         pending_rel_seqs->pop_back();
1122         ASSERT(sequence);
1123         ModelAction *acquire = sequence->acquire;
1124         const ModelAction *rf = sequence->rf;
1125         const ModelAction *release = sequence->release;
1126         ASSERT(acquire);
1127         ASSERT(release);
1128         ASSERT(rf);
1129         ASSERT(release->same_thread(rf));
1130
1131         if (write == NULL) {
1132                 /**
1133                  * @todo Forcing a synchronization requires that we set
1134                  * modification order constraints. For instance, we can't allow
1135                  * a fixup sequence in which two separate read-acquire
1136                  * operations read from the same sequence, where the first one
1137                  * synchronizes and the other doesn't. Essentially, we can't
1138                  * allow any writes to insert themselves between 'release' and
1139                  * 'rf'
1140                  */
1141
1142                 /* Must synchronize */
1143                 if (!acquire->synchronize_with(release)) {
1144                         set_bad_synchronization();
1145                         return;
1146                 }
1147                 /* Re-check all pending release sequences */
1148                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1149                 /* Re-check act for mo_graph edges */
1150                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1151
1152                 /* propagate synchronization to later actions */
1153                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1154                 for (; (*rit) != acquire; rit++) {
1155                         ModelAction *propagate = *rit;
1156                         if (acquire->happens_before(propagate)) {
1157                                 propagate->synchronize_with(acquire);
1158                                 /* Re-check 'propagate' for mo_graph edges */
1159                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1160                         }
1161                 }
1162         } else {
1163                 /* Break release sequence with new edges:
1164                  *   release --mo--> write --mo--> rf */
1165                 mo_graph->addEdge(release, write);
1166                 mo_graph->addEdge(write, rf);
1167         }
1168
1169         /* See if we have realized a data race */
1170         checkDataRaces();
1171 }
1172
1173 /**
1174  * Initialize the current action by performing one or more of the following
1175  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1176  * in the NodeStack, manipulating backtracking sets, allocating and
1177  * initializing clock vectors, and computing the promises to fulfill.
1178  *
1179  * @param curr The current action, as passed from the user context; may be
1180  * freed/invalidated after the execution of this function, with a different
1181  * action "returned" its place (pass-by-reference)
1182  * @return True if curr is a newly-explored action; false otherwise
1183  */
1184 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1185 {
1186         ModelAction *newcurr;
1187
1188         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1189                 newcurr = process_rmw(*curr);
1190                 delete *curr;
1191
1192                 if (newcurr->is_rmw())
1193                         compute_promises(newcurr);
1194
1195                 *curr = newcurr;
1196                 return false;
1197         }
1198
1199         (*curr)->set_seq_number(get_next_seq_num());
1200
1201         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1202         if (newcurr) {
1203                 /* First restore type and order in case of RMW operation */
1204                 if ((*curr)->is_rmwr())
1205                         newcurr->copy_typeandorder(*curr);
1206
1207                 ASSERT((*curr)->get_location() == newcurr->get_location());
1208                 newcurr->copy_from_new(*curr);
1209
1210                 /* Discard duplicate ModelAction; use action from NodeStack */
1211                 delete *curr;
1212
1213                 /* Always compute new clock vector */
1214                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1215
1216                 *curr = newcurr;
1217                 return false; /* Action was explored previously */
1218         } else {
1219                 newcurr = *curr;
1220
1221                 /* Always compute new clock vector */
1222                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1223
1224                 /* Assign most recent release fence */
1225                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1226
1227                 /*
1228                  * Perform one-time actions when pushing new ModelAction onto
1229                  * NodeStack
1230                  */
1231                 if (newcurr->is_write())
1232                         compute_promises(newcurr);
1233                 else if (newcurr->is_relseq_fixup())
1234                         compute_relseq_breakwrites(newcurr);
1235                 else if (newcurr->is_wait())
1236                         newcurr->get_node()->set_misc_max(2);
1237                 else if (newcurr->is_notify_one()) {
1238                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1239                 }
1240                 return true; /* This was a new ModelAction */
1241         }
1242 }
1243
1244 /**
1245  * @brief Establish reads-from relation between two actions
1246  *
1247  * Perform basic operations involved with establishing a concrete rf relation,
1248  * including setting the ModelAction data and checking for release sequences.
1249  *
1250  * @param act The action that is reading (must be a read)
1251  * @param rf The action from which we are reading (must be a write)
1252  *
1253  * @return True if this read established synchronization
1254  */
1255 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1256 {
1257         act->set_read_from(rf);
1258         if (rf != NULL && act->is_acquire()) {
1259                 rel_heads_list_t release_heads;
1260                 get_release_seq_heads(act, act, &release_heads);
1261                 int num_heads = release_heads.size();
1262                 for (unsigned int i = 0; i < release_heads.size(); i++)
1263                         if (!act->synchronize_with(release_heads[i])) {
1264                                 set_bad_synchronization();
1265                                 num_heads--;
1266                         }
1267                 return num_heads > 0;
1268         }
1269         return false;
1270 }
1271
1272 /**
1273  * @brief Check whether a model action is enabled.
1274  *
1275  * Checks whether a lock or join operation would be successful (i.e., is the
1276  * lock already locked, or is the joined thread already complete). If not, put
1277  * the action in a waiter list.
1278  *
1279  * @param curr is the ModelAction to check whether it is enabled.
1280  * @return a bool that indicates whether the action is enabled.
1281  */
1282 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1283         if (curr->is_lock()) {
1284                 std::mutex *lock = (std::mutex *)curr->get_location();
1285                 struct std::mutex_state *state = lock->get_state();
1286                 if (state->islocked) {
1287                         //Stick the action in the appropriate waiting queue
1288                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1289                         return false;
1290                 }
1291         } else if (curr->get_type() == THREAD_JOIN) {
1292                 Thread *blocking = (Thread *)curr->get_location();
1293                 if (!blocking->is_complete()) {
1294                         blocking->push_wait_list(curr);
1295                         return false;
1296                 }
1297         }
1298
1299         return true;
1300 }
1301
1302 /**
1303  * This is the heart of the model checker routine. It performs model-checking
1304  * actions corresponding to a given "current action." Among other processes, it
1305  * calculates reads-from relationships, updates synchronization clock vectors,
1306  * forms a memory_order constraints graph, and handles replay/backtrack
1307  * execution when running permutations of previously-observed executions.
1308  *
1309  * @param curr The current action to process
1310  * @return The ModelAction that is actually executed; may be different than
1311  * curr; may be NULL, if the current action is not enabled to run
1312  */
1313 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1314 {
1315         ASSERT(curr);
1316         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1317
1318         if (!check_action_enabled(curr)) {
1319                 /* Make the execution look like we chose to run this action
1320                  * much later, when a lock/join can succeed */
1321                 get_thread(curr)->set_pending(curr);
1322                 scheduler->sleep(get_thread(curr));
1323                 return NULL;
1324         }
1325
1326         bool newly_explored = initialize_curr_action(&curr);
1327
1328         DBG();
1329         if (DBG_ENABLED())
1330                 curr->print();
1331
1332         wake_up_sleeping_actions(curr);
1333
1334         /* Add the action to lists before any other model-checking tasks */
1335         if (!second_part_of_rmw)
1336                 add_action_to_lists(curr);
1337
1338         /* Build may_read_from set for newly-created actions */
1339         if (newly_explored && curr->is_read())
1340                 build_reads_from_past(curr);
1341
1342         /* Initialize work_queue with the "current action" work */
1343         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1344         while (!work_queue.empty() && !has_asserted()) {
1345                 WorkQueueEntry work = work_queue.front();
1346                 work_queue.pop_front();
1347
1348                 switch (work.type) {
1349                 case WORK_CHECK_CURR_ACTION: {
1350                         ModelAction *act = work.action;
1351                         bool update = false; /* update this location's release seq's */
1352                         bool update_all = false; /* update all release seq's */
1353
1354                         if (process_thread_action(curr))
1355                                 update_all = true;
1356
1357                         if (act->is_read() && process_read(act, second_part_of_rmw))
1358                                 update = true;
1359
1360                         if (act->is_write() && process_write(act))
1361                                 update = true;
1362
1363                         if (act->is_fence() && process_fence(act))
1364                                 update_all = true;
1365
1366                         if (act->is_mutex_op() && process_mutex(act))
1367                                 update_all = true;
1368
1369                         if (act->is_relseq_fixup())
1370                                 process_relseq_fixup(curr, &work_queue);
1371
1372                         if (update_all)
1373                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1374                         else if (update)
1375                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1376                         break;
1377                 }
1378                 case WORK_CHECK_RELEASE_SEQ:
1379                         resolve_release_sequences(work.location, &work_queue);
1380                         break;
1381                 case WORK_CHECK_MO_EDGES: {
1382                         /** @todo Complete verification of work_queue */
1383                         ModelAction *act = work.action;
1384                         bool updated = false;
1385
1386                         if (act->is_read()) {
1387                                 const ModelAction *rf = act->get_reads_from();
1388                                 const Promise *promise = act->get_reads_from_promise();
1389                                 if (rf) {
1390                                         if (r_modification_order(act, rf))
1391                                                 updated = true;
1392                                 } else if (promise) {
1393                                         if (r_modification_order(act, promise))
1394                                                 updated = true;
1395                                 }
1396                         }
1397                         if (act->is_write()) {
1398                                 if (w_modification_order(act))
1399                                         updated = true;
1400                         }
1401                         mo_graph->commitChanges();
1402
1403                         if (updated)
1404                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1405                         break;
1406                 }
1407                 default:
1408                         ASSERT(false);
1409                         break;
1410                 }
1411         }
1412
1413         check_curr_backtracking(curr);
1414         set_backtracking(curr);
1415         return curr;
1416 }
1417
1418 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1419 {
1420         Node *currnode = curr->get_node();
1421         Node *parnode = currnode->get_parent();
1422
1423         if ((parnode && !parnode->backtrack_empty()) ||
1424                          !currnode->misc_empty() ||
1425                          !currnode->read_from_empty() ||
1426                          !currnode->future_value_empty() ||
1427                          !currnode->promise_empty() ||
1428                          !currnode->relseq_break_empty()) {
1429                 set_latest_backtrack(curr);
1430         }
1431 }
1432
1433 bool ModelChecker::promises_expired() const
1434 {
1435         for (unsigned int i = 0; i < promises->size(); i++) {
1436                 Promise *promise = (*promises)[i];
1437                 if (promise->get_expiration() < priv->used_sequence_numbers)
1438                         return true;
1439         }
1440         return false;
1441 }
1442
1443 /**
1444  * This is the strongest feasibility check available.
1445  * @return whether the current trace (partial or complete) must be a prefix of
1446  * a feasible trace.
1447  */
1448 bool ModelChecker::isfeasibleprefix() const
1449 {
1450         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1451 }
1452
1453 /**
1454  * Print disagnostic information about an infeasible execution
1455  * @param prefix A string to prefix the output with; if NULL, then a default
1456  * message prefix will be provided
1457  */
1458 void ModelChecker::print_infeasibility(const char *prefix) const
1459 {
1460         char buf[100];
1461         char *ptr = buf;
1462         if (mo_graph->checkForCycles())
1463                 ptr += sprintf(ptr, "[mo cycle]");
1464         if (priv->failed_promise)
1465                 ptr += sprintf(ptr, "[failed promise]");
1466         if (priv->too_many_reads)
1467                 ptr += sprintf(ptr, "[too many reads]");
1468         if (priv->no_valid_reads)
1469                 ptr += sprintf(ptr, "[no valid reads-from]");
1470         if (priv->bad_synchronization)
1471                 ptr += sprintf(ptr, "[bad sw ordering]");
1472         if (promises_expired())
1473                 ptr += sprintf(ptr, "[promise expired]");
1474         if (promises->size() != 0)
1475                 ptr += sprintf(ptr, "[unresolved promise]");
1476         if (ptr != buf)
1477                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1478 }
1479
1480 /**
1481  * Returns whether the current completed trace is feasible, except for pending
1482  * release sequences.
1483  */
1484 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1485 {
1486         return !is_infeasible() && promises->size() == 0;
1487 }
1488
1489 /**
1490  * Check if the current partial trace is infeasible. Does not check any
1491  * end-of-execution flags, which might rule out the execution. Thus, this is
1492  * useful only for ruling an execution as infeasible.
1493  * @return whether the current partial trace is infeasible.
1494  */
1495 bool ModelChecker::is_infeasible() const
1496 {
1497         return mo_graph->checkForCycles() ||
1498                 priv->no_valid_reads ||
1499                 priv->failed_promise ||
1500                 priv->too_many_reads ||
1501                 priv->bad_synchronization ||
1502                 promises_expired();
1503 }
1504
1505 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1506 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1507         ModelAction *lastread = get_last_action(act->get_tid());
1508         lastread->process_rmw(act);
1509         if (act->is_rmw()) {
1510                 if (lastread->get_reads_from())
1511                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1512                 else
1513                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1514                 mo_graph->commitChanges();
1515         }
1516         return lastread;
1517 }
1518
1519 /**
1520  * Checks whether a thread has read from the same write for too many times
1521  * without seeing the effects of a later write.
1522  *
1523  * Basic idea:
1524  * 1) there must a different write that we could read from that would satisfy the modification order,
1525  * 2) we must have read from the same value in excess of maxreads times, and
1526  * 3) that other write must have been in the reads_from set for maxreads times.
1527  *
1528  * If so, we decide that the execution is no longer feasible.
1529  */
1530 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1531 {
1532         if (params.maxreads != 0) {
1533                 if (curr->get_node()->get_read_from_size() <= 1)
1534                         return;
1535                 //Must make sure that execution is currently feasible...  We could
1536                 //accidentally clear by rolling back
1537                 if (is_infeasible())
1538                         return;
1539                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1540                 int tid = id_to_int(curr->get_tid());
1541
1542                 /* Skip checks */
1543                 if ((int)thrd_lists->size() <= tid)
1544                         return;
1545                 action_list_t *list = &(*thrd_lists)[tid];
1546
1547                 action_list_t::reverse_iterator rit = list->rbegin();
1548                 /* Skip past curr */
1549                 for (; (*rit) != curr; rit++)
1550                         ;
1551                 /* go past curr now */
1552                 rit++;
1553
1554                 action_list_t::reverse_iterator ritcopy = rit;
1555                 //See if we have enough reads from the same value
1556                 int count = 0;
1557                 for (; count < params.maxreads; rit++, count++) {
1558                         if (rit == list->rend())
1559                                 return;
1560                         ModelAction *act = *rit;
1561                         if (!act->is_read())
1562                                 return;
1563
1564                         if (act->get_reads_from() != rf)
1565                                 return;
1566                         if (act->get_node()->get_read_from_size() <= 1)
1567                                 return;
1568                 }
1569                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1570                         /* Get write */
1571                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1572
1573                         /* Need a different write */
1574                         if (write == rf)
1575                                 continue;
1576
1577                         /* Test to see whether this is a feasible write to read from */
1578                         /** NOTE: all members of read-from set should be
1579                          *  feasible, so we no longer check it here **/
1580
1581                         rit = ritcopy;
1582
1583                         bool feasiblewrite = true;
1584                         //new we need to see if this write works for everyone
1585
1586                         for (int loop = count; loop > 0; loop--, rit++) {
1587                                 ModelAction *act = *rit;
1588                                 bool foundvalue = false;
1589                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1590                                         if (act->get_node()->get_read_from_at(j) == write) {
1591                                                 foundvalue = true;
1592                                                 break;
1593                                         }
1594                                 }
1595                                 if (!foundvalue) {
1596                                         feasiblewrite = false;
1597                                         break;
1598                                 }
1599                         }
1600                         if (feasiblewrite) {
1601                                 priv->too_many_reads = true;
1602                                 return;
1603                         }
1604                 }
1605         }
1606 }
1607
1608 /**
1609  * Updates the mo_graph with the constraints imposed from the current
1610  * read.
1611  *
1612  * Basic idea is the following: Go through each other thread and find
1613  * the last action that happened before our read.  Two cases:
1614  *
1615  * (1) The action is a write => that write must either occur before
1616  * the write we read from or be the write we read from.
1617  *
1618  * (2) The action is a read => the write that that action read from
1619  * must occur before the write we read from or be the same write.
1620  *
1621  * @param curr The current action. Must be a read.
1622  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1623  * @return True if modification order edges were added; false otherwise
1624  */
1625 template <typename rf_type>
1626 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1627 {
1628         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1629         unsigned int i;
1630         bool added = false;
1631         ASSERT(curr->is_read());
1632
1633         /* Last SC fence in the current thread */
1634         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1635
1636         /* Iterate over all threads */
1637         for (i = 0; i < thrd_lists->size(); i++) {
1638                 /* Last SC fence in thread i */
1639                 ModelAction *last_sc_fence_thread_local = NULL;
1640                 if (int_to_id((int)i) != curr->get_tid())
1641                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1642
1643                 /* Last SC fence in thread i, before last SC fence in current thread */
1644                 ModelAction *last_sc_fence_thread_before = NULL;
1645                 if (last_sc_fence_local)
1646                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1647
1648                 /* Iterate over actions in thread, starting from most recent */
1649                 action_list_t *list = &(*thrd_lists)[i];
1650                 action_list_t::reverse_iterator rit;
1651                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1652                         ModelAction *act = *rit;
1653
1654                         if (act->is_write() && !act->equals(rf) && act != curr) {
1655                                 /* C++, Section 29.3 statement 5 */
1656                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1657                                                 *act < *last_sc_fence_thread_local) {
1658                                         added = mo_graph->addEdge(act, rf) || added;
1659                                         break;
1660                                 }
1661                                 /* C++, Section 29.3 statement 4 */
1662                                 else if (act->is_seqcst() && last_sc_fence_local &&
1663                                                 *act < *last_sc_fence_local) {
1664                                         added = mo_graph->addEdge(act, rf) || added;
1665                                         break;
1666                                 }
1667                                 /* C++, Section 29.3 statement 6 */
1668                                 else if (last_sc_fence_thread_before &&
1669                                                 *act < *last_sc_fence_thread_before) {
1670                                         added = mo_graph->addEdge(act, rf) || added;
1671                                         break;
1672                                 }
1673                         }
1674
1675                         /*
1676                          * Include at most one act per-thread that "happens
1677                          * before" curr. Don't consider reflexively.
1678                          */
1679                         if (act->happens_before(curr) && act != curr) {
1680                                 if (act->is_write()) {
1681                                         if (!act->equals(rf)) {
1682                                                 added = mo_graph->addEdge(act, rf) || added;
1683                                         }
1684                                 } else {
1685                                         const ModelAction *prevreadfrom = act->get_reads_from();
1686                                         //if the previous read is unresolved, keep going...
1687                                         if (prevreadfrom == NULL)
1688                                                 continue;
1689
1690                                         if (!prevreadfrom->equals(rf)) {
1691                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1692                                         }
1693                                 }
1694                                 break;
1695                         }
1696                 }
1697         }
1698
1699         /*
1700          * All compatible, thread-exclusive promises must be ordered after any
1701          * concrete loads from the same thread
1702          */
1703         for (unsigned int i = 0; i < promises->size(); i++)
1704                 if ((*promises)[i]->is_compatible_exclusive(curr))
1705                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1706
1707         return added;
1708 }
1709
1710 /**
1711  * Updates the mo_graph with the constraints imposed from the current write.
1712  *
1713  * Basic idea is the following: Go through each other thread and find
1714  * the lastest action that happened before our write.  Two cases:
1715  *
1716  * (1) The action is a write => that write must occur before
1717  * the current write
1718  *
1719  * (2) The action is a read => the write that that action read from
1720  * must occur before the current write.
1721  *
1722  * This method also handles two other issues:
1723  *
1724  * (I) Sequential Consistency: Making sure that if the current write is
1725  * seq_cst, that it occurs after the previous seq_cst write.
1726  *
1727  * (II) Sending the write back to non-synchronizing reads.
1728  *
1729  * @param curr The current action. Must be a write.
1730  * @return True if modification order edges were added; false otherwise
1731  */
1732 bool ModelChecker::w_modification_order(ModelAction *curr)
1733 {
1734         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1735         unsigned int i;
1736         bool added = false;
1737         ASSERT(curr->is_write());
1738
1739         if (curr->is_seqcst()) {
1740                 /* We have to at least see the last sequentially consistent write,
1741                          so we are initialized. */
1742                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1743                 if (last_seq_cst != NULL) {
1744                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1745                 }
1746         }
1747
1748         /* Last SC fence in the current thread */
1749         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1750
1751         /* Iterate over all threads */
1752         for (i = 0; i < thrd_lists->size(); i++) {
1753                 /* Last SC fence in thread i, before last SC fence in current thread */
1754                 ModelAction *last_sc_fence_thread_before = NULL;
1755                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1756                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1757
1758                 /* Iterate over actions in thread, starting from most recent */
1759                 action_list_t *list = &(*thrd_lists)[i];
1760                 action_list_t::reverse_iterator rit;
1761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1762                         ModelAction *act = *rit;
1763                         if (act == curr) {
1764                                 /*
1765                                  * 1) If RMW and it actually read from something, then we
1766                                  * already have all relevant edges, so just skip to next
1767                                  * thread.
1768                                  *
1769                                  * 2) If RMW and it didn't read from anything, we should
1770                                  * whatever edge we can get to speed up convergence.
1771                                  *
1772                                  * 3) If normal write, we need to look at earlier actions, so
1773                                  * continue processing list.
1774                                  */
1775                                 if (curr->is_rmw()) {
1776                                         if (curr->get_reads_from() != NULL)
1777                                                 break;
1778                                         else
1779                                                 continue;
1780                                 } else
1781                                         continue;
1782                         }
1783
1784                         /* C++, Section 29.3 statement 7 */
1785                         if (last_sc_fence_thread_before && act->is_write() &&
1786                                         *act < *last_sc_fence_thread_before) {
1787                                 added = mo_graph->addEdge(act, curr) || added;
1788                                 break;
1789                         }
1790
1791                         /*
1792                          * Include at most one act per-thread that "happens
1793                          * before" curr
1794                          */
1795                         if (act->happens_before(curr)) {
1796                                 /*
1797                                  * Note: if act is RMW, just add edge:
1798                                  *   act --mo--> curr
1799                                  * The following edge should be handled elsewhere:
1800                                  *   readfrom(act) --mo--> act
1801                                  */
1802                                 if (act->is_write())
1803                                         added = mo_graph->addEdge(act, curr) || added;
1804                                 else if (act->is_read()) {
1805                                         //if previous read accessed a null, just keep going
1806                                         if (act->get_reads_from() == NULL)
1807                                                 continue;
1808                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1809                                 }
1810                                 break;
1811                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1812                                                      !act->same_thread(curr)) {
1813                                 /* We have an action that:
1814                                    (1) did not happen before us
1815                                    (2) is a read and we are a write
1816                                    (3) cannot synchronize with us
1817                                    (4) is in a different thread
1818                                    =>
1819                                    that read could potentially read from our write.  Note that
1820                                    these checks are overly conservative at this point, we'll
1821                                    do more checks before actually removing the
1822                                    pendingfuturevalue.
1823
1824                                  */
1825                                 if (thin_air_constraint_may_allow(curr, act)) {
1826                                         if (!is_infeasible())
1827                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1828                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1829                                                 add_future_value(curr, act);
1830                                 }
1831                         }
1832                 }
1833         }
1834
1835         /*
1836          * All compatible, thread-exclusive promises must be ordered after any
1837          * concrete stores to the same thread, or else they can be merged with
1838          * this store later
1839          */
1840         for (unsigned int i = 0; i < promises->size(); i++)
1841                 if ((*promises)[i]->is_compatible_exclusive(curr))
1842                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1843
1844         return added;
1845 }
1846
1847 /** Arbitrary reads from the future are not allowed.  Section 29.3
1848  * part 9 places some constraints.  This method checks one result of constraint
1849  * constraint.  Others require compiler support. */
1850 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1851 {
1852         if (!writer->is_rmw())
1853                 return true;
1854
1855         if (!reader->is_rmw())
1856                 return true;
1857
1858         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1859                 if (search == reader)
1860                         return false;
1861                 if (search->get_tid() == reader->get_tid() &&
1862                                 search->happens_before(reader))
1863                         break;
1864         }
1865
1866         return true;
1867 }
1868
1869 /**
1870  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1871  * some constraints. This method checks one the following constraint (others
1872  * require compiler support):
1873  *
1874  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1875  */
1876 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1877 {
1878         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1879         unsigned int i;
1880         /* Iterate over all threads */
1881         for (i = 0; i < thrd_lists->size(); i++) {
1882                 const ModelAction *write_after_read = NULL;
1883
1884                 /* Iterate over actions in thread, starting from most recent */
1885                 action_list_t *list = &(*thrd_lists)[i];
1886                 action_list_t::reverse_iterator rit;
1887                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1888                         ModelAction *act = *rit;
1889
1890                         /* Don't disallow due to act == reader */
1891                         if (!reader->happens_before(act) || reader == act)
1892                                 break;
1893                         else if (act->is_write())
1894                                 write_after_read = act;
1895                         else if (act->is_read() && act->get_reads_from() != NULL)
1896                                 write_after_read = act->get_reads_from();
1897                 }
1898
1899                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1900                         return false;
1901         }
1902         return true;
1903 }
1904
1905 /**
1906  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1907  * The ModelAction under consideration is expected to be taking part in
1908  * release/acquire synchronization as an object of the "reads from" relation.
1909  * Note that this can only provide release sequence support for RMW chains
1910  * which do not read from the future, as those actions cannot be traced until
1911  * their "promise" is fulfilled. Similarly, we may not even establish the
1912  * presence of a release sequence with certainty, as some modification order
1913  * constraints may be decided further in the future. Thus, this function
1914  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1915  * and a boolean representing certainty.
1916  *
1917  * @param rf The action that might be part of a release sequence. Must be a
1918  * write.
1919  * @param release_heads A pass-by-reference style return parameter. After
1920  * execution of this function, release_heads will contain the heads of all the
1921  * relevant release sequences, if any exists with certainty
1922  * @param pending A pass-by-reference style return parameter which is only used
1923  * when returning false (i.e., uncertain). Returns most information regarding
1924  * an uncertain release sequence, including any write operations that might
1925  * break the sequence.
1926  * @return true, if the ModelChecker is certain that release_heads is complete;
1927  * false otherwise
1928  */
1929 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1930                 rel_heads_list_t *release_heads,
1931                 struct release_seq *pending) const
1932 {
1933         /* Only check for release sequences if there are no cycles */
1934         if (mo_graph->checkForCycles())
1935                 return false;
1936
1937         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1938                 ASSERT(rf->is_write());
1939
1940                 if (rf->is_release())
1941                         release_heads->push_back(rf);
1942                 else if (rf->get_last_fence_release())
1943                         release_heads->push_back(rf->get_last_fence_release());
1944                 if (!rf->is_rmw())
1945                         break; /* End of RMW chain */
1946
1947                 /** @todo Need to be smarter here...  In the linux lock
1948                  * example, this will run to the beginning of the program for
1949                  * every acquire. */
1950                 /** @todo The way to be smarter here is to keep going until 1
1951                  * thread has a release preceded by an acquire and you've seen
1952                  *       both. */
1953
1954                 /* acq_rel RMW is a sufficient stopping condition */
1955                 if (rf->is_acquire() && rf->is_release())
1956                         return true; /* complete */
1957         };
1958         if (!rf) {
1959                 /* read from future: need to settle this later */
1960                 pending->rf = NULL;
1961                 return false; /* incomplete */
1962         }
1963
1964         if (rf->is_release())
1965                 return true; /* complete */
1966
1967         /* else relaxed write
1968          * - check for fence-release in the same thread (29.8, stmt. 3)
1969          * - check modification order for contiguous subsequence
1970          *   -> rf must be same thread as release */
1971
1972         const ModelAction *fence_release = rf->get_last_fence_release();
1973         /* Synchronize with a fence-release unconditionally; we don't need to
1974          * find any more "contiguous subsequence..." for it */
1975         if (fence_release)
1976                 release_heads->push_back(fence_release);
1977
1978         int tid = id_to_int(rf->get_tid());
1979         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1980         action_list_t *list = &(*thrd_lists)[tid];
1981         action_list_t::const_reverse_iterator rit;
1982
1983         /* Find rf in the thread list */
1984         rit = std::find(list->rbegin(), list->rend(), rf);
1985         ASSERT(rit != list->rend());
1986
1987         /* Find the last {write,fence}-release */
1988         for (; rit != list->rend(); rit++) {
1989                 if (fence_release && *(*rit) < *fence_release)
1990                         break;
1991                 if ((*rit)->is_release())
1992                         break;
1993         }
1994         if (rit == list->rend()) {
1995                 /* No write-release in this thread */
1996                 return true; /* complete */
1997         } else if (fence_release && *(*rit) < *fence_release) {
1998                 /* The fence-release is more recent (and so, "stronger") than
1999                  * the most recent write-release */
2000                 return true; /* complete */
2001         } /* else, need to establish contiguous release sequence */
2002         ModelAction *release = *rit;
2003
2004         ASSERT(rf->same_thread(release));
2005
2006         pending->writes.clear();
2007
2008         bool certain = true;
2009         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2010                 if (id_to_int(rf->get_tid()) == (int)i)
2011                         continue;
2012                 list = &(*thrd_lists)[i];
2013
2014                 /* Can we ensure no future writes from this thread may break
2015                  * the release seq? */
2016                 bool future_ordered = false;
2017
2018                 ModelAction *last = get_last_action(int_to_id(i));
2019                 Thread *th = get_thread(int_to_id(i));
2020                 if ((last && rf->happens_before(last)) ||
2021                                 !is_enabled(th) ||
2022                                 th->is_complete())
2023                         future_ordered = true;
2024
2025                 ASSERT(!th->is_model_thread() || future_ordered);
2026
2027                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2028                         const ModelAction *act = *rit;
2029                         /* Reach synchronization -> this thread is complete */
2030                         if (act->happens_before(release))
2031                                 break;
2032                         if (rf->happens_before(act)) {
2033                                 future_ordered = true;
2034                                 continue;
2035                         }
2036
2037                         /* Only non-RMW writes can break release sequences */
2038                         if (!act->is_write() || act->is_rmw())
2039                                 continue;
2040
2041                         /* Check modification order */
2042                         if (mo_graph->checkReachable(rf, act)) {
2043                                 /* rf --mo--> act */
2044                                 future_ordered = true;
2045                                 continue;
2046                         }
2047                         if (mo_graph->checkReachable(act, release))
2048                                 /* act --mo--> release */
2049                                 break;
2050                         if (mo_graph->checkReachable(release, act) &&
2051                                       mo_graph->checkReachable(act, rf)) {
2052                                 /* release --mo-> act --mo--> rf */
2053                                 return true; /* complete */
2054                         }
2055                         /* act may break release sequence */
2056                         pending->writes.push_back(act);
2057                         certain = false;
2058                 }
2059                 if (!future_ordered)
2060                         certain = false; /* This thread is uncertain */
2061         }
2062
2063         if (certain) {
2064                 release_heads->push_back(release);
2065                 pending->writes.clear();
2066         } else {
2067                 pending->release = release;
2068                 pending->rf = rf;
2069         }
2070         return certain;
2071 }
2072
2073 /**
2074  * An interface for getting the release sequence head(s) with which a
2075  * given ModelAction must synchronize. This function only returns a non-empty
2076  * result when it can locate a release sequence head with certainty. Otherwise,
2077  * it may mark the internal state of the ModelChecker so that it will handle
2078  * the release sequence at a later time, causing @a acquire to update its
2079  * synchronization at some later point in execution.
2080  *
2081  * @param acquire The 'acquire' action that may synchronize with a release
2082  * sequence
2083  * @param read The read action that may read from a release sequence; this may
2084  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2085  * when 'acquire' is a fence-acquire)
2086  * @param release_heads A pass-by-reference return parameter. Will be filled
2087  * with the head(s) of the release sequence(s), if they exists with certainty.
2088  * @see ModelChecker::release_seq_heads
2089  */
2090 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2091                 ModelAction *read, rel_heads_list_t *release_heads)
2092 {
2093         const ModelAction *rf = read->get_reads_from();
2094         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2095         sequence->acquire = acquire;
2096         sequence->read = read;
2097
2098         if (!release_seq_heads(rf, release_heads, sequence)) {
2099                 /* add act to 'lazy checking' list */
2100                 pending_rel_seqs->push_back(sequence);
2101         } else {
2102                 snapshot_free(sequence);
2103         }
2104 }
2105
2106 /**
2107  * Attempt to resolve all stashed operations that might synchronize with a
2108  * release sequence for a given location. This implements the "lazy" portion of
2109  * determining whether or not a release sequence was contiguous, since not all
2110  * modification order information is present at the time an action occurs.
2111  *
2112  * @param location The location/object that should be checked for release
2113  * sequence resolutions. A NULL value means to check all locations.
2114  * @param work_queue The work queue to which to add work items as they are
2115  * generated
2116  * @return True if any updates occurred (new synchronization, new mo_graph
2117  * edges)
2118  */
2119 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2120 {
2121         bool updated = false;
2122         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2123         while (it != pending_rel_seqs->end()) {
2124                 struct release_seq *pending = *it;
2125                 ModelAction *acquire = pending->acquire;
2126                 const ModelAction *read = pending->read;
2127
2128                 /* Only resolve sequences on the given location, if provided */
2129                 if (location && read->get_location() != location) {
2130                         it++;
2131                         continue;
2132                 }
2133
2134                 const ModelAction *rf = read->get_reads_from();
2135                 rel_heads_list_t release_heads;
2136                 bool complete;
2137                 complete = release_seq_heads(rf, &release_heads, pending);
2138                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2139                         if (!acquire->has_synchronized_with(release_heads[i])) {
2140                                 if (acquire->synchronize_with(release_heads[i]))
2141                                         updated = true;
2142                                 else
2143                                         set_bad_synchronization();
2144                         }
2145                 }
2146
2147                 if (updated) {
2148                         /* Re-check all pending release sequences */
2149                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2150                         /* Re-check read-acquire for mo_graph edges */
2151                         if (acquire->is_read())
2152                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2153
2154                         /* propagate synchronization to later actions */
2155                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2156                         for (; (*rit) != acquire; rit++) {
2157                                 ModelAction *propagate = *rit;
2158                                 if (acquire->happens_before(propagate)) {
2159                                         propagate->synchronize_with(acquire);
2160                                         /* Re-check 'propagate' for mo_graph edges */
2161                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2162                                 }
2163                         }
2164                 }
2165                 if (complete) {
2166                         it = pending_rel_seqs->erase(it);
2167                         snapshot_free(pending);
2168                 } else {
2169                         it++;
2170                 }
2171         }
2172
2173         // If we resolved promises or data races, see if we have realized a data race.
2174         checkDataRaces();
2175
2176         return updated;
2177 }
2178
2179 /**
2180  * Performs various bookkeeping operations for the current ModelAction. For
2181  * instance, adds action to the per-object, per-thread action vector and to the
2182  * action trace list of all thread actions.
2183  *
2184  * @param act is the ModelAction to add.
2185  */
2186 void ModelChecker::add_action_to_lists(ModelAction *act)
2187 {
2188         int tid = id_to_int(act->get_tid());
2189         ModelAction *uninit = NULL;
2190         int uninit_id = -1;
2191         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2192         if (list->empty() && act->is_atomic_var()) {
2193                 uninit = new_uninitialized_action(act->get_location());
2194                 uninit_id = id_to_int(uninit->get_tid());
2195                 list->push_back(uninit);
2196         }
2197         list->push_back(act);
2198
2199         action_trace->push_back(act);
2200         if (uninit)
2201                 action_trace->push_front(uninit);
2202
2203         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2204         if (tid >= (int)vec->size())
2205                 vec->resize(priv->next_thread_id);
2206         (*vec)[tid].push_back(act);
2207         if (uninit)
2208                 (*vec)[uninit_id].push_front(uninit);
2209
2210         if ((int)thrd_last_action->size() <= tid)
2211                 thrd_last_action->resize(get_num_threads());
2212         (*thrd_last_action)[tid] = act;
2213         if (uninit)
2214                 (*thrd_last_action)[uninit_id] = uninit;
2215
2216         if (act->is_fence() && act->is_release()) {
2217                 if ((int)thrd_last_fence_release->size() <= tid)
2218                         thrd_last_fence_release->resize(get_num_threads());
2219                 (*thrd_last_fence_release)[tid] = act;
2220         }
2221
2222         if (act->is_wait()) {
2223                 void *mutex_loc = (void *) act->get_value();
2224                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2225
2226                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2227                 if (tid >= (int)vec->size())
2228                         vec->resize(priv->next_thread_id);
2229                 (*vec)[tid].push_back(act);
2230         }
2231 }
2232
2233 /**
2234  * @brief Get the last action performed by a particular Thread
2235  * @param tid The thread ID of the Thread in question
2236  * @return The last action in the thread
2237  */
2238 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2239 {
2240         int threadid = id_to_int(tid);
2241         if (threadid < (int)thrd_last_action->size())
2242                 return (*thrd_last_action)[id_to_int(tid)];
2243         else
2244                 return NULL;
2245 }
2246
2247 /**
2248  * @brief Get the last fence release performed by a particular Thread
2249  * @param tid The thread ID of the Thread in question
2250  * @return The last fence release in the thread, if one exists; NULL otherwise
2251  */
2252 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2253 {
2254         int threadid = id_to_int(tid);
2255         if (threadid < (int)thrd_last_fence_release->size())
2256                 return (*thrd_last_fence_release)[id_to_int(tid)];
2257         else
2258                 return NULL;
2259 }
2260
2261 /**
2262  * Gets the last memory_order_seq_cst write (in the total global sequence)
2263  * performed on a particular object (i.e., memory location), not including the
2264  * current action.
2265  * @param curr The current ModelAction; also denotes the object location to
2266  * check
2267  * @return The last seq_cst write
2268  */
2269 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2270 {
2271         void *location = curr->get_location();
2272         action_list_t *list = get_safe_ptr_action(obj_map, location);
2273         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2274         action_list_t::reverse_iterator rit;
2275         for (rit = list->rbegin(); rit != list->rend(); rit++)
2276                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2277                         return *rit;
2278         return NULL;
2279 }
2280
2281 /**
2282  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2283  * performed in a particular thread, prior to a particular fence.
2284  * @param tid The ID of the thread to check
2285  * @param before_fence The fence from which to begin the search; if NULL, then
2286  * search for the most recent fence in the thread.
2287  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2288  */
2289 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2290 {
2291         /* All fences should have NULL location */
2292         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2293         action_list_t::reverse_iterator rit = list->rbegin();
2294
2295         if (before_fence) {
2296                 for (; rit != list->rend(); rit++)
2297                         if (*rit == before_fence)
2298                                 break;
2299
2300                 ASSERT(*rit == before_fence);
2301                 rit++;
2302         }
2303
2304         for (; rit != list->rend(); rit++)
2305                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2306                         return *rit;
2307         return NULL;
2308 }
2309
2310 /**
2311  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2312  * location). This function identifies the mutex according to the current
2313  * action, which is presumed to perform on the same mutex.
2314  * @param curr The current ModelAction; also denotes the object location to
2315  * check
2316  * @return The last unlock operation
2317  */
2318 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2319 {
2320         void *location = curr->get_location();
2321         action_list_t *list = get_safe_ptr_action(obj_map, location);
2322         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2323         action_list_t::reverse_iterator rit;
2324         for (rit = list->rbegin(); rit != list->rend(); rit++)
2325                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2326                         return *rit;
2327         return NULL;
2328 }
2329
2330 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2331 {
2332         ModelAction *parent = get_last_action(tid);
2333         if (!parent)
2334                 parent = get_thread(tid)->get_creation();
2335         return parent;
2336 }
2337
2338 /**
2339  * Returns the clock vector for a given thread.
2340  * @param tid The thread whose clock vector we want
2341  * @return Desired clock vector
2342  */
2343 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2344 {
2345         return get_parent_action(tid)->get_cv();
2346 }
2347
2348 /**
2349  * Resolve a set of Promises with a current write. The set is provided in the
2350  * Node corresponding to @a write.
2351  * @param write The ModelAction that is fulfilling Promises
2352  * @return True if promises were resolved; false otherwise
2353  */
2354 bool ModelChecker::resolve_promises(ModelAction *write)
2355 {
2356         bool haveResolved = false;
2357         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2358         promise_list_t mustResolve, resolved;
2359
2360         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2361                 Promise *promise = (*promises)[promise_index];
2362                 if (write->get_node()->get_promise(i)) {
2363                         ModelAction *read = promise->get_action();
2364                         read_from(read, write);
2365                         //Make sure the promise's value matches the write's value
2366                         ASSERT(promise->is_compatible(write));
2367                         mo_graph->resolvePromise(read, write, &mustResolve);
2368
2369                         resolved.push_back(promise);
2370                         promises->erase(promises->begin() + promise_index);
2371                         actions_to_check.push_back(read);
2372
2373                         haveResolved = true;
2374                 } else
2375                         promise_index++;
2376         }
2377
2378         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2379                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2380                                 == resolved.end())
2381                         priv->failed_promise = true;
2382         }
2383         for (unsigned int i = 0; i < resolved.size(); i++)
2384                 delete resolved[i];
2385         //Check whether reading these writes has made threads unable to
2386         //resolve promises
2387
2388         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2389                 ModelAction *read = actions_to_check[i];
2390                 mo_check_promises(read, true);
2391         }
2392
2393         return haveResolved;
2394 }
2395
2396 /**
2397  * Compute the set of promises that could potentially be satisfied by this
2398  * action. Note that the set computation actually appears in the Node, not in
2399  * ModelChecker.
2400  * @param curr The ModelAction that may satisfy promises
2401  */
2402 void ModelChecker::compute_promises(ModelAction *curr)
2403 {
2404         for (unsigned int i = 0; i < promises->size(); i++) {
2405                 Promise *promise = (*promises)[i];
2406                 const ModelAction *act = promise->get_action();
2407                 if (!act->happens_before(curr) &&
2408                                 act->is_read() &&
2409                                 !act->could_synchronize_with(curr) &&
2410                                 !act->same_thread(curr) &&
2411                                 act->get_location() == curr->get_location() &&
2412                                 promise->get_value() == curr->get_value()) {
2413                         curr->get_node()->set_promise(i, act->is_rmw());
2414                 }
2415         }
2416 }
2417
2418 /** Checks promises in response to change in ClockVector Threads. */
2419 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2420 {
2421         for (unsigned int i = 0; i < promises->size(); i++) {
2422                 Promise *promise = (*promises)[i];
2423                 const ModelAction *act = promise->get_action();
2424                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2425                                 merge_cv->synchronized_since(act)) {
2426                         if (promise->eliminate_thread(tid)) {
2427                                 //Promise has failed
2428                                 priv->failed_promise = true;
2429                                 return;
2430                         }
2431                 }
2432         }
2433 }
2434
2435 void ModelChecker::check_promises_thread_disabled()
2436 {
2437         for (unsigned int i = 0; i < promises->size(); i++) {
2438                 Promise *promise = (*promises)[i];
2439                 if (promise->has_failed()) {
2440                         priv->failed_promise = true;
2441                         return;
2442                 }
2443         }
2444 }
2445
2446 /**
2447  * @brief Checks promises in response to addition to modification order for
2448  * threads.
2449  *
2450  * We test whether threads are still available for satisfying promises after an
2451  * addition to our modification order constraints. Those that are unavailable
2452  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2453  * that promise has failed.
2454  *
2455  * @param act The ModelAction which updated the modification order
2456  * @param is_read_check Should be true if act is a read and we must check for
2457  * updates to the store from which it read (there is a distinction here for
2458  * RMW's, which are both a load and a store)
2459  */
2460 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2461 {
2462         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2463
2464         for (unsigned int i = 0; i < promises->size(); i++) {
2465                 Promise *promise = (*promises)[i];
2466                 const ModelAction *pread = promise->get_action();
2467
2468                 // Is this promise on the same location?
2469                 if (!pread->same_var(write))
2470                         continue;
2471
2472                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2473                         priv->failed_promise = true;
2474                         return;
2475                 }
2476
2477                 // Don't do any lookups twice for the same thread
2478                 if (!promise->thread_is_available(act->get_tid()))
2479                         continue;
2480
2481                 if (mo_graph->checkReachable(promise, write)) {
2482                         if (mo_graph->checkPromise(write, promise)) {
2483                                 priv->failed_promise = true;
2484                                 return;
2485                         }
2486                 }
2487         }
2488 }
2489
2490 /**
2491  * Compute the set of writes that may break the current pending release
2492  * sequence. This information is extracted from previou release sequence
2493  * calculations.
2494  *
2495  * @param curr The current ModelAction. Must be a release sequence fixup
2496  * action.
2497  */
2498 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2499 {
2500         if (pending_rel_seqs->empty())
2501                 return;
2502
2503         struct release_seq *pending = pending_rel_seqs->back();
2504         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2505                 const ModelAction *write = pending->writes[i];
2506                 curr->get_node()->add_relseq_break(write);
2507         }
2508
2509         /* NULL means don't break the sequence; just synchronize */
2510         curr->get_node()->add_relseq_break(NULL);
2511 }
2512
2513 /**
2514  * Build up an initial set of all past writes that this 'read' action may read
2515  * from. This set is determined by the clock vector's "happens before"
2516  * relationship.
2517  * @param curr is the current ModelAction that we are exploring; it must be a
2518  * 'read' operation.
2519  */
2520 void ModelChecker::build_reads_from_past(ModelAction *curr)
2521 {
2522         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2523         unsigned int i;
2524         ASSERT(curr->is_read());
2525
2526         ModelAction *last_sc_write = NULL;
2527
2528         if (curr->is_seqcst())
2529                 last_sc_write = get_last_seq_cst_write(curr);
2530
2531         /* Iterate over all threads */
2532         for (i = 0; i < thrd_lists->size(); i++) {
2533                 /* Iterate over actions in thread, starting from most recent */
2534                 action_list_t *list = &(*thrd_lists)[i];
2535                 action_list_t::reverse_iterator rit;
2536                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2537                         ModelAction *act = *rit;
2538
2539                         /* Only consider 'write' actions */
2540                         if (!act->is_write() || act == curr)
2541                                 continue;
2542
2543                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2544                         bool allow_read = true;
2545
2546                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2547                                 allow_read = false;
2548                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2549                                 allow_read = false;
2550
2551                         if (allow_read) {
2552                                 /* Only add feasible reads */
2553                                 mo_graph->startChanges();
2554                                 r_modification_order(curr, act);
2555                                 if (!is_infeasible())
2556                                         curr->get_node()->add_read_from(act);
2557                                 mo_graph->rollbackChanges();
2558                         }
2559
2560                         /* Include at most one act per-thread that "happens before" curr */
2561                         if (act->happens_before(curr))
2562                                 break;
2563                 }
2564         }
2565         /* We may find no valid may-read-from only if the execution is doomed */
2566         if (!curr->get_node()->get_read_from_size()) {
2567                 priv->no_valid_reads = true;
2568                 set_assert();
2569         }
2570
2571         if (DBG_ENABLED()) {
2572                 model_print("Reached read action:\n");
2573                 curr->print();
2574                 model_print("Printing may_read_from\n");
2575                 curr->get_node()->print_may_read_from();
2576                 model_print("End printing may_read_from\n");
2577         }
2578 }
2579
2580 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2581 {
2582         for ( ; write != NULL; write = write->get_reads_from()) {
2583                 /* UNINIT actions don't have a Node, and they never sleep */
2584                 if (write->is_uninitialized())
2585                         return true;
2586                 Node *prevnode = write->get_node()->get_parent();
2587
2588                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2589                 if (write->is_release() && thread_sleep)
2590                         return true;
2591                 if (!write->is_rmw())
2592                         return false;
2593         }
2594         return true;
2595 }
2596
2597 /**
2598  * @brief Create a new action representing an uninitialized atomic
2599  * @param location The memory location of the atomic object
2600  * @return A pointer to a new ModelAction
2601  */
2602 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2603 {
2604         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2605         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2606         act->create_cv(NULL);
2607         return act;
2608 }
2609
2610 static void print_list(action_list_t *list)
2611 {
2612         action_list_t::iterator it;
2613
2614         model_print("---------------------------------------------------------------------\n");
2615
2616         unsigned int hash = 0;
2617
2618         for (it = list->begin(); it != list->end(); it++) {
2619                 (*it)->print();
2620                 hash = hash^(hash<<3)^((*it)->hash());
2621         }
2622         model_print("HASH %u\n", hash);
2623         model_print("---------------------------------------------------------------------\n");
2624 }
2625
2626 #if SUPPORT_MOD_ORDER_DUMP
2627 void ModelChecker::dumpGraph(char *filename) const
2628 {
2629         char buffer[200];
2630         sprintf(buffer, "%s.dot", filename);
2631         FILE *file = fopen(buffer, "w");
2632         fprintf(file, "digraph %s {\n", filename);
2633         mo_graph->dumpNodes(file);
2634         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2635
2636         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2637                 ModelAction *action = *it;
2638                 if (action->is_read()) {
2639                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2640                         if (action->get_reads_from() != NULL)
2641                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2642                 }
2643                 if (thread_array[action->get_tid()] != NULL) {
2644                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2645                 }
2646
2647                 thread_array[action->get_tid()] = action;
2648         }
2649         fprintf(file, "}\n");
2650         model_free(thread_array);
2651         fclose(file);
2652 }
2653 #endif
2654
2655 /** @brief Prints an execution trace summary. */
2656 void ModelChecker::print_summary() const
2657 {
2658 #if SUPPORT_MOD_ORDER_DUMP
2659         char buffername[100];
2660         sprintf(buffername, "exec%04u", stats.num_total);
2661         mo_graph->dumpGraphToFile(buffername);
2662         sprintf(buffername, "graph%04u", stats.num_total);
2663         dumpGraph(buffername);
2664 #endif
2665
2666         model_print("Execution %d:", stats.num_total);
2667         if (isfeasibleprefix())
2668                 model_print("\n");
2669         else
2670                 print_infeasibility(" INFEASIBLE");
2671         print_list(action_trace);
2672         model_print("\n");
2673 }
2674
2675 /**
2676  * Add a Thread to the system for the first time. Should only be called once
2677  * per thread.
2678  * @param t The Thread to add
2679  */
2680 void ModelChecker::add_thread(Thread *t)
2681 {
2682         thread_map->put(id_to_int(t->get_id()), t);
2683         scheduler->add_thread(t);
2684 }
2685
2686 /**
2687  * Removes a thread from the scheduler.
2688  * @param the thread to remove.
2689  */
2690 void ModelChecker::remove_thread(Thread *t)
2691 {
2692         scheduler->remove_thread(t);
2693 }
2694
2695 /**
2696  * @brief Get a Thread reference by its ID
2697  * @param tid The Thread's ID
2698  * @return A Thread reference
2699  */
2700 Thread * ModelChecker::get_thread(thread_id_t tid) const
2701 {
2702         return thread_map->get(id_to_int(tid));
2703 }
2704
2705 /**
2706  * @brief Get a reference to the Thread in which a ModelAction was executed
2707  * @param act The ModelAction
2708  * @return A Thread reference
2709  */
2710 Thread * ModelChecker::get_thread(const ModelAction *act) const
2711 {
2712         return get_thread(act->get_tid());
2713 }
2714
2715 /**
2716  * @brief Check if a Thread is currently enabled
2717  * @param t The Thread to check
2718  * @return True if the Thread is currently enabled
2719  */
2720 bool ModelChecker::is_enabled(Thread *t) const
2721 {
2722         return scheduler->is_enabled(t);
2723 }
2724
2725 /**
2726  * @brief Check if a Thread is currently enabled
2727  * @param tid The ID of the Thread to check
2728  * @return True if the Thread is currently enabled
2729  */
2730 bool ModelChecker::is_enabled(thread_id_t tid) const
2731 {
2732         return scheduler->is_enabled(tid);
2733 }
2734
2735 /**
2736  * Switch from a model-checker context to a user-thread context. This is the
2737  * complement of ModelChecker::switch_to_master and must be called from the
2738  * model-checker context
2739  *
2740  * @param thread The user-thread to switch to
2741  */
2742 void ModelChecker::switch_from_master(Thread *thread)
2743 {
2744         scheduler->set_current_thread(thread);
2745         Thread::swap(&system_context, thread);
2746 }
2747
2748 /**
2749  * Switch from a user-context to the "master thread" context (a.k.a. system
2750  * context). This switch is made with the intention of exploring a particular
2751  * model-checking action (described by a ModelAction object). Must be called
2752  * from a user-thread context.
2753  *
2754  * @param act The current action that will be explored. May be NULL only if
2755  * trace is exiting via an assertion (see ModelChecker::set_assert and
2756  * ModelChecker::has_asserted).
2757  * @return Return the value returned by the current action
2758  */
2759 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2760 {
2761         DBG();
2762         Thread *old = thread_current();
2763         ASSERT(!old->get_pending());
2764         old->set_pending(act);
2765         if (Thread::swap(old, &system_context) < 0) {
2766                 perror("swap threads");
2767                 exit(EXIT_FAILURE);
2768         }
2769         return old->get_return_value();
2770 }
2771
2772 /**
2773  * Takes the next step in the execution, if possible.
2774  * @param curr The current step to take
2775  * @return Returns the next Thread to run, if any; NULL if this execution
2776  * should terminate
2777  */
2778 Thread * ModelChecker::take_step(ModelAction *curr)
2779 {
2780         Thread *curr_thrd = get_thread(curr);
2781         ASSERT(curr_thrd->get_state() == THREAD_READY);
2782
2783         curr = check_current_action(curr);
2784
2785         /* Infeasible -> don't take any more steps */
2786         if (is_infeasible())
2787                 return NULL;
2788         else if (isfeasibleprefix() && have_bug_reports()) {
2789                 set_assert();
2790                 return NULL;
2791         }
2792
2793         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2794                 return NULL;
2795
2796         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2797                 scheduler->remove_thread(curr_thrd);
2798
2799         Thread *next_thrd = get_next_thread(curr);
2800
2801         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2802                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2803
2804         return next_thrd;
2805 }
2806
2807 /** Wrapper to run the user's main function, with appropriate arguments */
2808 void user_main_wrapper(void *)
2809 {
2810         user_main(model->params.argc, model->params.argv);
2811 }
2812
2813 /** @brief Run ModelChecker for the user program */
2814 void ModelChecker::run()
2815 {
2816         do {
2817                 thrd_t user_thread;
2818                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2819                 add_thread(t);
2820
2821                 do {
2822                         /*
2823                          * Stash next pending action(s) for thread(s). There
2824                          * should only need to stash one thread's action--the
2825                          * thread which just took a step--plus the first step
2826                          * for any newly-created thread
2827                          */
2828                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2829                                 thread_id_t tid = int_to_id(i);
2830                                 Thread *thr = get_thread(tid);
2831                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2832                                         switch_from_master(thr);
2833                                 }
2834                         }
2835
2836                         /* Catch assertions from prior take_step or from
2837                          * between-ModelAction bugs (e.g., data races) */
2838                         if (has_asserted())
2839                                 break;
2840
2841                         /* Consume the next action for a Thread */
2842                         ModelAction *curr = t->get_pending();
2843                         t->set_pending(NULL);
2844                         t = take_step(curr);
2845                 } while (t && !t->is_model_thread());
2846
2847                 /*
2848                  * Launch end-of-execution release sequence fixups only when
2849                  * the execution is otherwise feasible AND there are:
2850                  *
2851                  * (1) pending release sequences
2852                  * (2) pending assertions that could be invalidated by a change
2853                  * in clock vectors (i.e., data races)
2854                  * (3) no pending promises
2855                  */
2856                 while (!pending_rel_seqs->empty() &&
2857                                 is_feasible_prefix_ignore_relseq() &&
2858                                 !unrealizedraces.empty()) {
2859                         model_print("*** WARNING: release sequence fixup action "
2860                                         "(%zu pending release seuqence(s)) ***\n",
2861                                         pending_rel_seqs->size());
2862                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2863                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2864                                         model_thread);
2865                         take_step(fixup);
2866                 };
2867         } while (next_execution());
2868
2869         model_print("******* Model-checking complete: *******\n");
2870         print_stats();
2871 }