model: only backtrack fences when acquire is before release
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* The correct sleep set is in the parent node. */
286                 execute_sleep_set();
287
288                 DEBUG("*** Divergence point ***\n");
289
290                 diverge = NULL;
291         } else {
292                 tid = next->get_tid();
293         }
294         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
295         ASSERT(tid != THREAD_ID_T_NONE);
296         return thread_map->get(id_to_int(tid));
297 }
298
299 /**
300  * We need to know what the next actions of all threads in the sleep
301  * set will be.  This method computes them and stores the actions at
302  * the corresponding thread object's pending action.
303  */
304
305 void ModelChecker::execute_sleep_set()
306 {
307         for (unsigned int i = 0; i < get_num_threads(); i++) {
308                 thread_id_t tid = int_to_id(i);
309                 Thread *thr = get_thread(tid);
310                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
311                         thr->get_pending()->set_sleep_flag();
312                 }
313         }
314 }
315
316 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
317 {
318         for (unsigned int i = 0; i < get_num_threads(); i++) {
319                 Thread *thr = get_thread(int_to_id(i));
320                 if (scheduler->is_sleep_set(thr)) {
321                         ModelAction *pending_act = thr->get_pending();
322                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
323                                 //Remove this thread from sleep set
324                                 scheduler->remove_sleep(thr);
325                 }
326         }
327 }
328
329 /** @brief Alert the model-checker that an incorrectly-ordered
330  * synchronization was made */
331 void ModelChecker::set_bad_synchronization()
332 {
333         priv->bad_synchronization = true;
334 }
335
336 /**
337  * Check whether the current trace has triggered an assertion which should halt
338  * its execution.
339  *
340  * @return True, if the execution should be aborted; false otherwise
341  */
342 bool ModelChecker::has_asserted() const
343 {
344         return priv->asserted;
345 }
346
347 /**
348  * Trigger a trace assertion which should cause this execution to be halted.
349  * This can be due to a detected bug or due to an infeasibility that should
350  * halt ASAP.
351  */
352 void ModelChecker::set_assert()
353 {
354         priv->asserted = true;
355 }
356
357 /**
358  * Check if we are in a deadlock. Should only be called at the end of an
359  * execution, although it should not give false positives in the middle of an
360  * execution (there should be some ENABLED thread).
361  *
362  * @return True if program is in a deadlock; false otherwise
363  */
364 bool ModelChecker::is_deadlocked() const
365 {
366         bool blocking_threads = false;
367         for (unsigned int i = 0; i < get_num_threads(); i++) {
368                 thread_id_t tid = int_to_id(i);
369                 if (is_enabled(tid))
370                         return false;
371                 Thread *t = get_thread(tid);
372                 if (!t->is_model_thread() && t->get_pending())
373                         blocking_threads = true;
374         }
375         return blocking_threads;
376 }
377
378 /**
379  * Check if this is a complete execution. That is, have all thread completed
380  * execution (rather than exiting because sleep sets have forced a redundant
381  * execution).
382  *
383  * @return True if the execution is complete.
384  */
385 bool ModelChecker::is_complete_execution() const
386 {
387         for (unsigned int i = 0; i < get_num_threads(); i++)
388                 if (is_enabled(int_to_id(i)))
389                         return false;
390         return true;
391 }
392
393 /**
394  * @brief Assert a bug in the executing program.
395  *
396  * Use this function to assert any sort of bug in the user program. If the
397  * current trace is feasible (actually, a prefix of some feasible execution),
398  * then this execution will be aborted, printing the appropriate message. If
399  * the current trace is not yet feasible, the error message will be stashed and
400  * printed if the execution ever becomes feasible.
401  *
402  * @param msg Descriptive message for the bug (do not include newline char)
403  * @return True if bug is immediately-feasible
404  */
405 bool ModelChecker::assert_bug(const char *msg)
406 {
407         priv->bugs.push_back(new bug_message(msg));
408
409         if (isfeasibleprefix()) {
410                 set_assert();
411                 return true;
412         }
413         return false;
414 }
415
416 /**
417  * @brief Assert a bug in the executing program, asserted by a user thread
418  * @see ModelChecker::assert_bug
419  * @param msg Descriptive message for the bug (do not include newline char)
420  */
421 void ModelChecker::assert_user_bug(const char *msg)
422 {
423         /* If feasible bug, bail out now */
424         if (assert_bug(msg))
425                 switch_to_master(NULL);
426 }
427
428 /** @return True, if any bugs have been reported for this execution */
429 bool ModelChecker::have_bug_reports() const
430 {
431         return priv->bugs.size() != 0;
432 }
433
434 /** @brief Print bug report listing for this execution (if any bugs exist) */
435 void ModelChecker::print_bugs() const
436 {
437         if (have_bug_reports()) {
438                 model_print("Bug report: %zu bug%s detected\n",
439                                 priv->bugs.size(),
440                                 priv->bugs.size() > 1 ? "s" : "");
441                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
442                         priv->bugs[i]->print();
443         }
444 }
445
446 /**
447  * @brief Record end-of-execution stats
448  *
449  * Must be run when exiting an execution. Records various stats.
450  * @see struct execution_stats
451  */
452 void ModelChecker::record_stats()
453 {
454         stats.num_total++;
455         if (!isfeasibleprefix())
456                 stats.num_infeasible++;
457         else if (have_bug_reports())
458                 stats.num_buggy_executions++;
459         else if (is_complete_execution())
460                 stats.num_complete++;
461         else
462                 stats.num_redundant++;
463 }
464
465 /** @brief Print execution stats */
466 void ModelChecker::print_stats() const
467 {
468         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
469         model_print("Number of redundant executions: %d\n", stats.num_redundant);
470         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
471         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
472         model_print("Total executions: %d\n", stats.num_total);
473         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
474 }
475
476 /**
477  * @brief End-of-exeuction print
478  * @param printbugs Should any existing bugs be printed?
479  */
480 void ModelChecker::print_execution(bool printbugs) const
481 {
482         print_program_output();
483
484         if (DBG_ENABLED() || params.verbose) {
485                 model_print("Earliest divergence point since last feasible execution:\n");
486                 if (earliest_diverge)
487                         earliest_diverge->print();
488                 else
489                         model_print("(Not set)\n");
490
491                 model_print("\n");
492                 print_stats();
493         }
494
495         /* Don't print invalid bugs */
496         if (printbugs)
497                 print_bugs();
498
499         model_print("\n");
500         print_summary();
501 }
502
503 /**
504  * Queries the model-checker for more executions to explore and, if one
505  * exists, resets the model-checker state to execute a new execution.
506  *
507  * @return If there are more executions to explore, return true. Otherwise,
508  * return false.
509  */
510 bool ModelChecker::next_execution()
511 {
512         DBG();
513         /* Is this execution a feasible execution that's worth bug-checking? */
514         bool complete = isfeasibleprefix() && (is_complete_execution() ||
515                         have_bug_reports());
516
517         /* End-of-execution bug checks */
518         if (complete) {
519                 if (is_deadlocked())
520                         assert_bug("Deadlock detected");
521
522                 checkDataRaces();
523         }
524
525         record_stats();
526
527         /* Output */
528         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
529                 print_execution(complete);
530         else
531                 clear_program_output();
532
533         if (complete)
534                 earliest_diverge = NULL;
535
536         if ((diverge = get_next_backtrack()) == NULL)
537                 return false;
538
539         if (DBG_ENABLED()) {
540                 model_print("Next execution will diverge at:\n");
541                 diverge->print();
542         }
543
544         reset_to_initial_state();
545         return true;
546 }
547
548 /**
549  * @brief Find the last fence-related backtracking conflict for a ModelAction
550  *
551  * This function performs the search for the most recent conflicting action
552  * against which we should perform backtracking, as affected by fence
553  * operations. This includes pairs of potentially-synchronizing actions which
554  * occur due to fence-acquire or fence-release, and hence should be explored in
555  * the opposite execution order.
556  *
557  * @param act The current action
558  * @return The most recent action which conflicts with act due to fences
559  */
560 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
561 {
562         /* Only perform release/acquire fence backtracking for stores */
563         if (!act->is_write())
564                 return NULL;
565
566         /* Find a fence-release (or, act is a release) */
567         ModelAction *last_release;
568         if (act->is_release())
569                 last_release = act;
570         else
571                 last_release = get_last_fence_release(act->get_tid());
572         if (!last_release)
573                 return NULL;
574
575         /* Skip past the release */
576         action_list_t *list = action_trace;
577         action_list_t::reverse_iterator rit;
578         for (rit = list->rbegin(); rit != list->rend(); rit++)
579                 if (*rit == last_release)
580                         break;
581         ASSERT(rit != list->rend());
582
583         /* Find a prior:
584          *   load-acquire
585          * or
586          *   load --sb-> fence-acquire */
587         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
588         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
589         bool found_acquire_fences = false;
590         for ( ; rit != list->rend(); rit++) {
591                 ModelAction *prev = *rit;
592                 if (act->same_thread(prev))
593                         continue;
594
595                 int tid = id_to_int(prev->get_tid());
596
597                 if (prev->is_read() && act->same_var(prev)) {
598                         if (prev->is_acquire()) {
599                                 /* Found most recent load-acquire, don't need
600                                  * to search for more fences */
601                                 if (!found_acquire_fences)
602                                         return NULL;
603                         } else {
604                                 prior_loads[tid] = prev;
605                         }
606                 }
607                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
608                         found_acquire_fences = true;
609                         acquire_fences[tid] = prev;
610                 }
611         }
612
613         ModelAction *latest_backtrack = NULL;
614         for (unsigned int i = 0; i < acquire_fences.size(); i++)
615                 if (acquire_fences[i] && prior_loads[i])
616                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
617                                 latest_backtrack = acquire_fences[i];
618         return latest_backtrack;
619 }
620
621 /**
622  * @brief Find the last backtracking conflict for a ModelAction
623  *
624  * This function performs the search for the most recent conflicting action
625  * against which we should perform backtracking. This primary includes pairs of
626  * synchronizing actions which should be explored in the opposite execution
627  * order.
628  *
629  * @param act The current action
630  * @return The most recent action which conflicts with act
631  */
632 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
633 {
634         switch (act->get_type()) {
635         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
636         case ATOMIC_READ:
637         case ATOMIC_WRITE:
638         case ATOMIC_RMW: {
639                 ModelAction *ret = NULL;
640
641                 /* linear search: from most recent to oldest */
642                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
643                 action_list_t::reverse_iterator rit;
644                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
645                         ModelAction *prev = *rit;
646                         if (prev->could_synchronize_with(act)) {
647                                 ret = prev;
648                                 break;
649                         }
650                 }
651
652                 ModelAction *ret2 = get_last_fence_conflict(act);
653                 if (!ret2)
654                         return ret;
655                 if (!ret)
656                         return ret2;
657                 if (*ret < *ret2)
658                         return ret2;
659                 return ret;
660         }
661         case ATOMIC_LOCK:
662         case ATOMIC_TRYLOCK: {
663                 /* linear search: from most recent to oldest */
664                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
665                 action_list_t::reverse_iterator rit;
666                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
667                         ModelAction *prev = *rit;
668                         if (act->is_conflicting_lock(prev))
669                                 return prev;
670                 }
671                 break;
672         }
673         case ATOMIC_UNLOCK: {
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (!act->same_thread(prev) && prev->is_failed_trylock())
680                                 return prev;
681                 }
682                 break;
683         }
684         case ATOMIC_WAIT: {
685                 /* linear search: from most recent to oldest */
686                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
687                 action_list_t::reverse_iterator rit;
688                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
689                         ModelAction *prev = *rit;
690                         if (!act->same_thread(prev) && prev->is_failed_trylock())
691                                 return prev;
692                         if (!act->same_thread(prev) && prev->is_notify())
693                                 return prev;
694                 }
695                 break;
696         }
697
698         case ATOMIC_NOTIFY_ALL:
699         case ATOMIC_NOTIFY_ONE: {
700                 /* linear search: from most recent to oldest */
701                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
702                 action_list_t::reverse_iterator rit;
703                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
704                         ModelAction *prev = *rit;
705                         if (!act->same_thread(prev) && prev->is_wait())
706                                 return prev;
707                 }
708                 break;
709         }
710         default:
711                 break;
712         }
713         return NULL;
714 }
715
716 /** This method finds backtracking points where we should try to
717  * reorder the parameter ModelAction against.
718  *
719  * @param the ModelAction to find backtracking points for.
720  */
721 void ModelChecker::set_backtracking(ModelAction *act)
722 {
723         Thread *t = get_thread(act);
724         ModelAction *prev = get_last_conflict(act);
725         if (prev == NULL)
726                 return;
727
728         Node *node = prev->get_node()->get_parent();
729
730         int low_tid, high_tid;
731         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
732                 low_tid = id_to_int(act->get_tid());
733                 high_tid = low_tid + 1;
734         } else {
735                 low_tid = 0;
736                 high_tid = get_num_threads();
737         }
738
739         for (int i = low_tid; i < high_tid; i++) {
740                 thread_id_t tid = int_to_id(i);
741
742                 /* Make sure this thread can be enabled here. */
743                 if (i >= node->get_num_threads())
744                         break;
745
746                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
747                 if (node->enabled_status(tid) != THREAD_ENABLED)
748                         continue;
749
750                 /* Check if this has been explored already */
751                 if (node->has_been_explored(tid))
752                         continue;
753
754                 /* See if fairness allows */
755                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
756                         bool unfair = false;
757                         for (int t = 0; t < node->get_num_threads(); t++) {
758                                 thread_id_t tother = int_to_id(t);
759                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
760                                         unfair = true;
761                                         break;
762                                 }
763                         }
764                         if (unfair)
765                                 continue;
766                 }
767                 /* Cache the latest backtracking point */
768                 set_latest_backtrack(prev);
769
770                 /* If this is a new backtracking point, mark the tree */
771                 if (!node->set_backtrack(tid))
772                         continue;
773                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
774                                         id_to_int(prev->get_tid()),
775                                         id_to_int(t->get_id()));
776                 if (DBG_ENABLED()) {
777                         prev->print();
778                         act->print();
779                 }
780         }
781 }
782
783 /**
784  * @brief Cache the a backtracking point as the "most recent", if eligible
785  *
786  * Note that this does not prepare the NodeStack for this backtracking
787  * operation, it only caches the action on a per-execution basis
788  *
789  * @param act The operation at which we should explore a different next action
790  * (i.e., backtracking point)
791  * @return True, if this action is now the most recent backtracking point;
792  * false otherwise
793  */
794 bool ModelChecker::set_latest_backtrack(ModelAction *act)
795 {
796         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
797                 priv->next_backtrack = act;
798                 return true;
799         }
800         return false;
801 }
802
803 /**
804  * Returns last backtracking point. The model checker will explore a different
805  * path for this point in the next execution.
806  * @return The ModelAction at which the next execution should diverge.
807  */
808 ModelAction * ModelChecker::get_next_backtrack()
809 {
810         ModelAction *next = priv->next_backtrack;
811         priv->next_backtrack = NULL;
812         return next;
813 }
814
815 /**
816  * Processes a read or rmw model action.
817  * @param curr is the read model action to process.
818  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
819  * @return True if processing this read updates the mo_graph.
820  */
821 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
822 {
823         uint64_t value = VALUE_NONE;
824         bool updated = false;
825         while (true) {
826                 const ModelAction *reads_from = curr->get_node()->get_read_from();
827                 if (reads_from != NULL) {
828                         mo_graph->startChanges();
829
830                         value = reads_from->get_value();
831                         bool r_status = false;
832
833                         if (!second_part_of_rmw) {
834                                 check_recency(curr, reads_from);
835                                 r_status = r_modification_order(curr, reads_from);
836                         }
837
838                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
839                                 mo_graph->rollbackChanges();
840                                 priv->too_many_reads = false;
841                                 continue;
842                         }
843
844                         read_from(curr, reads_from);
845                         mo_graph->commitChanges();
846                         mo_check_promises(curr, true);
847
848                         updated |= r_status;
849                 } else if (!second_part_of_rmw) {
850                         /* Read from future value */
851                         struct future_value fv = curr->get_node()->get_future_value();
852                         Promise *promise = new Promise(curr, fv);
853                         value = fv.value;
854                         curr->set_read_from_promise(promise);
855                         promises->push_back(promise);
856                         mo_graph->startChanges();
857                         updated = r_modification_order(curr, promise);
858                         mo_graph->commitChanges();
859                 }
860                 get_thread(curr)->set_return_value(value);
861                 return updated;
862         }
863 }
864
865 /**
866  * Processes a lock, trylock, or unlock model action.  @param curr is
867  * the read model action to process.
868  *
869  * The try lock operation checks whether the lock is taken.  If not,
870  * it falls to the normal lock operation case.  If so, it returns
871  * fail.
872  *
873  * The lock operation has already been checked that it is enabled, so
874  * it just grabs the lock and synchronizes with the previous unlock.
875  *
876  * The unlock operation has to re-enable all of the threads that are
877  * waiting on the lock.
878  *
879  * @return True if synchronization was updated; false otherwise
880  */
881 bool ModelChecker::process_mutex(ModelAction *curr)
882 {
883         std::mutex *mutex = NULL;
884         struct std::mutex_state *state = NULL;
885
886         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
887                 mutex = (std::mutex *)curr->get_location();
888                 state = mutex->get_state();
889         } else if (curr->is_wait()) {
890                 mutex = (std::mutex *)curr->get_value();
891                 state = mutex->get_state();
892         }
893
894         switch (curr->get_type()) {
895         case ATOMIC_TRYLOCK: {
896                 bool success = !state->islocked;
897                 curr->set_try_lock(success);
898                 if (!success) {
899                         get_thread(curr)->set_return_value(0);
900                         break;
901                 }
902                 get_thread(curr)->set_return_value(1);
903         }
904                 //otherwise fall into the lock case
905         case ATOMIC_LOCK: {
906                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
907                         assert_bug("Lock access before initialization");
908                 state->islocked = true;
909                 ModelAction *unlock = get_last_unlock(curr);
910                 //synchronize with the previous unlock statement
911                 if (unlock != NULL) {
912                         curr->synchronize_with(unlock);
913                         return true;
914                 }
915                 break;
916         }
917         case ATOMIC_UNLOCK: {
918                 //unlock the lock
919                 state->islocked = false;
920                 //wake up the other threads
921                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
922                 //activate all the waiting threads
923                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
924                         scheduler->wake(get_thread(*rit));
925                 }
926                 waiters->clear();
927                 break;
928         }
929         case ATOMIC_WAIT: {
930                 //unlock the lock
931                 state->islocked = false;
932                 //wake up the other threads
933                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
934                 //activate all the waiting threads
935                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
936                         scheduler->wake(get_thread(*rit));
937                 }
938                 waiters->clear();
939                 //check whether we should go to sleep or not...simulate spurious failures
940                 if (curr->get_node()->get_misc() == 0) {
941                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
942                         //disable us
943                         scheduler->sleep(get_thread(curr));
944                 }
945                 break;
946         }
947         case ATOMIC_NOTIFY_ALL: {
948                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
949                 //activate all the waiting threads
950                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
951                         scheduler->wake(get_thread(*rit));
952                 }
953                 waiters->clear();
954                 break;
955         }
956         case ATOMIC_NOTIFY_ONE: {
957                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
958                 int wakeupthread = curr->get_node()->get_misc();
959                 action_list_t::iterator it = waiters->begin();
960                 advance(it, wakeupthread);
961                 scheduler->wake(get_thread(*it));
962                 waiters->erase(it);
963                 break;
964         }
965
966         default:
967                 ASSERT(0);
968         }
969         return false;
970 }
971
972 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
973 {
974         /* Do more ambitious checks now that mo is more complete */
975         if (mo_may_allow(writer, reader)) {
976                 Node *node = reader->get_node();
977
978                 /* Find an ancestor thread which exists at the time of the reader */
979                 Thread *write_thread = get_thread(writer);
980                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
981                         write_thread = write_thread->get_parent();
982
983                 struct future_value fv = {
984                         writer->get_value(),
985                         writer->get_seq_number() + params.maxfuturedelay,
986                         write_thread->get_id(),
987                 };
988                 if (node->add_future_value(fv))
989                         set_latest_backtrack(reader);
990         }
991 }
992
993 /**
994  * Process a write ModelAction
995  * @param curr The ModelAction to process
996  * @return True if the mo_graph was updated or promises were resolved
997  */
998 bool ModelChecker::process_write(ModelAction *curr)
999 {
1000         bool updated_mod_order = w_modification_order(curr);
1001         bool updated_promises = resolve_promises(curr);
1002
1003         if (promises->size() == 0) {
1004                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1005                         struct PendingFutureValue pfv = (*futurevalues)[i];
1006                         add_future_value(pfv.writer, pfv.act);
1007                 }
1008                 futurevalues->clear();
1009         }
1010
1011         mo_graph->commitChanges();
1012         mo_check_promises(curr, false);
1013
1014         get_thread(curr)->set_return_value(VALUE_NONE);
1015         return updated_mod_order || updated_promises;
1016 }
1017
1018 /**
1019  * Process a fence ModelAction
1020  * @param curr The ModelAction to process
1021  * @return True if synchronization was updated
1022  */
1023 bool ModelChecker::process_fence(ModelAction *curr)
1024 {
1025         /*
1026          * fence-relaxed: no-op
1027          * fence-release: only log the occurence (not in this function), for
1028          *   use in later synchronization
1029          * fence-acquire (this function): search for hypothetical release
1030          *   sequences
1031          */
1032         bool updated = false;
1033         if (curr->is_acquire()) {
1034                 action_list_t *list = action_trace;
1035                 action_list_t::reverse_iterator rit;
1036                 /* Find X : is_read(X) && X --sb-> curr */
1037                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1038                         ModelAction *act = *rit;
1039                         if (act == curr)
1040                                 continue;
1041                         if (act->get_tid() != curr->get_tid())
1042                                 continue;
1043                         /* Stop at the beginning of the thread */
1044                         if (act->is_thread_start())
1045                                 break;
1046                         /* Stop once we reach a prior fence-acquire */
1047                         if (act->is_fence() && act->is_acquire())
1048                                 break;
1049                         if (!act->is_read())
1050                                 continue;
1051                         /* read-acquire will find its own release sequences */
1052                         if (act->is_acquire())
1053                                 continue;
1054
1055                         /* Establish hypothetical release sequences */
1056                         rel_heads_list_t release_heads;
1057                         get_release_seq_heads(curr, act, &release_heads);
1058                         for (unsigned int i = 0; i < release_heads.size(); i++)
1059                                 if (!curr->synchronize_with(release_heads[i]))
1060                                         set_bad_synchronization();
1061                         if (release_heads.size() != 0)
1062                                 updated = true;
1063                 }
1064         }
1065         return updated;
1066 }
1067
1068 /**
1069  * @brief Process the current action for thread-related activity
1070  *
1071  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1072  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1073  * synchronization, etc.  This function is a no-op for non-THREAD actions
1074  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1075  *
1076  * @param curr The current action
1077  * @return True if synchronization was updated or a thread completed
1078  */
1079 bool ModelChecker::process_thread_action(ModelAction *curr)
1080 {
1081         bool updated = false;
1082
1083         switch (curr->get_type()) {
1084         case THREAD_CREATE: {
1085                 thrd_t *thrd = (thrd_t *)curr->get_location();
1086                 struct thread_params *params = (struct thread_params *)curr->get_value();
1087                 Thread *th = new Thread(thrd, params->func, params->arg);
1088                 add_thread(th);
1089                 th->set_creation(curr);
1090                 /* Promises can be satisfied by children */
1091                 for (unsigned int i = 0; i < promises->size(); i++) {
1092                         Promise *promise = (*promises)[i];
1093                         if (promise->thread_is_available(curr->get_tid()))
1094                                 promise->add_thread(th->get_id());
1095                 }
1096                 break;
1097         }
1098         case THREAD_JOIN: {
1099                 Thread *blocking = curr->get_thread_operand();
1100                 ModelAction *act = get_last_action(blocking->get_id());
1101                 curr->synchronize_with(act);
1102                 updated = true; /* trigger rel-seq checks */
1103                 break;
1104         }
1105         case THREAD_FINISH: {
1106                 Thread *th = get_thread(curr);
1107                 while (!th->wait_list_empty()) {
1108                         ModelAction *act = th->pop_wait_list();
1109                         scheduler->wake(get_thread(act));
1110                 }
1111                 th->complete();
1112                 /* Completed thread can't satisfy promises */
1113                 for (unsigned int i = 0; i < promises->size(); i++) {
1114                         Promise *promise = (*promises)[i];
1115                         if (promise->thread_is_available(th->get_id()))
1116                                 if (promise->eliminate_thread(th->get_id()))
1117                                         priv->failed_promise = true;
1118                 }
1119                 updated = true; /* trigger rel-seq checks */
1120                 break;
1121         }
1122         case THREAD_START: {
1123                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1124                 break;
1125         }
1126         default:
1127                 break;
1128         }
1129
1130         return updated;
1131 }
1132
1133 /**
1134  * @brief Process the current action for release sequence fixup activity
1135  *
1136  * Performs model-checker release sequence fixups for the current action,
1137  * forcing a single pending release sequence to break (with a given, potential
1138  * "loose" write) or to complete (i.e., synchronize). If a pending release
1139  * sequence forms a complete release sequence, then we must perform the fixup
1140  * synchronization, mo_graph additions, etc.
1141  *
1142  * @param curr The current action; must be a release sequence fixup action
1143  * @param work_queue The work queue to which to add work items as they are
1144  * generated
1145  */
1146 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1147 {
1148         const ModelAction *write = curr->get_node()->get_relseq_break();
1149         struct release_seq *sequence = pending_rel_seqs->back();
1150         pending_rel_seqs->pop_back();
1151         ASSERT(sequence);
1152         ModelAction *acquire = sequence->acquire;
1153         const ModelAction *rf = sequence->rf;
1154         const ModelAction *release = sequence->release;
1155         ASSERT(acquire);
1156         ASSERT(release);
1157         ASSERT(rf);
1158         ASSERT(release->same_thread(rf));
1159
1160         if (write == NULL) {
1161                 /**
1162                  * @todo Forcing a synchronization requires that we set
1163                  * modification order constraints. For instance, we can't allow
1164                  * a fixup sequence in which two separate read-acquire
1165                  * operations read from the same sequence, where the first one
1166                  * synchronizes and the other doesn't. Essentially, we can't
1167                  * allow any writes to insert themselves between 'release' and
1168                  * 'rf'
1169                  */
1170
1171                 /* Must synchronize */
1172                 if (!acquire->synchronize_with(release)) {
1173                         set_bad_synchronization();
1174                         return;
1175                 }
1176                 /* Re-check all pending release sequences */
1177                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1178                 /* Re-check act for mo_graph edges */
1179                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1180
1181                 /* propagate synchronization to later actions */
1182                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1183                 for (; (*rit) != acquire; rit++) {
1184                         ModelAction *propagate = *rit;
1185                         if (acquire->happens_before(propagate)) {
1186                                 propagate->synchronize_with(acquire);
1187                                 /* Re-check 'propagate' for mo_graph edges */
1188                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1189                         }
1190                 }
1191         } else {
1192                 /* Break release sequence with new edges:
1193                  *   release --mo--> write --mo--> rf */
1194                 mo_graph->addEdge(release, write);
1195                 mo_graph->addEdge(write, rf);
1196         }
1197
1198         /* See if we have realized a data race */
1199         checkDataRaces();
1200 }
1201
1202 /**
1203  * Initialize the current action by performing one or more of the following
1204  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1205  * in the NodeStack, manipulating backtracking sets, allocating and
1206  * initializing clock vectors, and computing the promises to fulfill.
1207  *
1208  * @param curr The current action, as passed from the user context; may be
1209  * freed/invalidated after the execution of this function, with a different
1210  * action "returned" its place (pass-by-reference)
1211  * @return True if curr is a newly-explored action; false otherwise
1212  */
1213 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1214 {
1215         ModelAction *newcurr;
1216
1217         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1218                 newcurr = process_rmw(*curr);
1219                 delete *curr;
1220
1221                 if (newcurr->is_rmw())
1222                         compute_promises(newcurr);
1223
1224                 *curr = newcurr;
1225                 return false;
1226         }
1227
1228         (*curr)->set_seq_number(get_next_seq_num());
1229
1230         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1231         if (newcurr) {
1232                 /* First restore type and order in case of RMW operation */
1233                 if ((*curr)->is_rmwr())
1234                         newcurr->copy_typeandorder(*curr);
1235
1236                 ASSERT((*curr)->get_location() == newcurr->get_location());
1237                 newcurr->copy_from_new(*curr);
1238
1239                 /* Discard duplicate ModelAction; use action from NodeStack */
1240                 delete *curr;
1241
1242                 /* Always compute new clock vector */
1243                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1244
1245                 *curr = newcurr;
1246                 return false; /* Action was explored previously */
1247         } else {
1248                 newcurr = *curr;
1249
1250                 /* Always compute new clock vector */
1251                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1252
1253                 /* Assign most recent release fence */
1254                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1255
1256                 /*
1257                  * Perform one-time actions when pushing new ModelAction onto
1258                  * NodeStack
1259                  */
1260                 if (newcurr->is_write())
1261                         compute_promises(newcurr);
1262                 else if (newcurr->is_relseq_fixup())
1263                         compute_relseq_breakwrites(newcurr);
1264                 else if (newcurr->is_wait())
1265                         newcurr->get_node()->set_misc_max(2);
1266                 else if (newcurr->is_notify_one()) {
1267                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1268                 }
1269                 return true; /* This was a new ModelAction */
1270         }
1271 }
1272
1273 /**
1274  * @brief Establish reads-from relation between two actions
1275  *
1276  * Perform basic operations involved with establishing a concrete rf relation,
1277  * including setting the ModelAction data and checking for release sequences.
1278  *
1279  * @param act The action that is reading (must be a read)
1280  * @param rf The action from which we are reading (must be a write)
1281  *
1282  * @return True if this read established synchronization
1283  */
1284 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1285 {
1286         act->set_read_from(rf);
1287         if (rf != NULL && act->is_acquire()) {
1288                 rel_heads_list_t release_heads;
1289                 get_release_seq_heads(act, act, &release_heads);
1290                 int num_heads = release_heads.size();
1291                 for (unsigned int i = 0; i < release_heads.size(); i++)
1292                         if (!act->synchronize_with(release_heads[i])) {
1293                                 set_bad_synchronization();
1294                                 num_heads--;
1295                         }
1296                 return num_heads > 0;
1297         }
1298         return false;
1299 }
1300
1301 /**
1302  * @brief Check whether a model action is enabled.
1303  *
1304  * Checks whether a lock or join operation would be successful (i.e., is the
1305  * lock already locked, or is the joined thread already complete). If not, put
1306  * the action in a waiter list.
1307  *
1308  * @param curr is the ModelAction to check whether it is enabled.
1309  * @return a bool that indicates whether the action is enabled.
1310  */
1311 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1312         if (curr->is_lock()) {
1313                 std::mutex *lock = (std::mutex *)curr->get_location();
1314                 struct std::mutex_state *state = lock->get_state();
1315                 if (state->islocked) {
1316                         //Stick the action in the appropriate waiting queue
1317                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1318                         return false;
1319                 }
1320         } else if (curr->get_type() == THREAD_JOIN) {
1321                 Thread *blocking = (Thread *)curr->get_location();
1322                 if (!blocking->is_complete()) {
1323                         blocking->push_wait_list(curr);
1324                         return false;
1325                 }
1326         }
1327
1328         return true;
1329 }
1330
1331 /**
1332  * This is the heart of the model checker routine. It performs model-checking
1333  * actions corresponding to a given "current action." Among other processes, it
1334  * calculates reads-from relationships, updates synchronization clock vectors,
1335  * forms a memory_order constraints graph, and handles replay/backtrack
1336  * execution when running permutations of previously-observed executions.
1337  *
1338  * @param curr The current action to process
1339  * @return The ModelAction that is actually executed; may be different than
1340  * curr; may be NULL, if the current action is not enabled to run
1341  */
1342 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1343 {
1344         ASSERT(curr);
1345         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1346
1347         if (!check_action_enabled(curr)) {
1348                 /* Make the execution look like we chose to run this action
1349                  * much later, when a lock/join can succeed */
1350                 get_thread(curr)->set_pending(curr);
1351                 scheduler->sleep(get_thread(curr));
1352                 return NULL;
1353         }
1354
1355         bool newly_explored = initialize_curr_action(&curr);
1356
1357         DBG();
1358         if (DBG_ENABLED())
1359                 curr->print();
1360
1361         wake_up_sleeping_actions(curr);
1362
1363         /* Add the action to lists before any other model-checking tasks */
1364         if (!second_part_of_rmw)
1365                 add_action_to_lists(curr);
1366
1367         /* Build may_read_from set for newly-created actions */
1368         if (newly_explored && curr->is_read())
1369                 build_reads_from_past(curr);
1370
1371         /* Initialize work_queue with the "current action" work */
1372         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1373         while (!work_queue.empty() && !has_asserted()) {
1374                 WorkQueueEntry work = work_queue.front();
1375                 work_queue.pop_front();
1376
1377                 switch (work.type) {
1378                 case WORK_CHECK_CURR_ACTION: {
1379                         ModelAction *act = work.action;
1380                         bool update = false; /* update this location's release seq's */
1381                         bool update_all = false; /* update all release seq's */
1382
1383                         if (process_thread_action(curr))
1384                                 update_all = true;
1385
1386                         if (act->is_read() && process_read(act, second_part_of_rmw))
1387                                 update = true;
1388
1389                         if (act->is_write() && process_write(act))
1390                                 update = true;
1391
1392                         if (act->is_fence() && process_fence(act))
1393                                 update_all = true;
1394
1395                         if (act->is_mutex_op() && process_mutex(act))
1396                                 update_all = true;
1397
1398                         if (act->is_relseq_fixup())
1399                                 process_relseq_fixup(curr, &work_queue);
1400
1401                         if (update_all)
1402                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1403                         else if (update)
1404                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1405                         break;
1406                 }
1407                 case WORK_CHECK_RELEASE_SEQ:
1408                         resolve_release_sequences(work.location, &work_queue);
1409                         break;
1410                 case WORK_CHECK_MO_EDGES: {
1411                         /** @todo Complete verification of work_queue */
1412                         ModelAction *act = work.action;
1413                         bool updated = false;
1414
1415                         if (act->is_read()) {
1416                                 const ModelAction *rf = act->get_reads_from();
1417                                 const Promise *promise = act->get_reads_from_promise();
1418                                 if (rf) {
1419                                         if (r_modification_order(act, rf))
1420                                                 updated = true;
1421                                 } else if (promise) {
1422                                         if (r_modification_order(act, promise))
1423                                                 updated = true;
1424                                 }
1425                         }
1426                         if (act->is_write()) {
1427                                 if (w_modification_order(act))
1428                                         updated = true;
1429                         }
1430                         mo_graph->commitChanges();
1431
1432                         if (updated)
1433                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1434                         break;
1435                 }
1436                 default:
1437                         ASSERT(false);
1438                         break;
1439                 }
1440         }
1441
1442         check_curr_backtracking(curr);
1443         set_backtracking(curr);
1444         return curr;
1445 }
1446
1447 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1448 {
1449         Node *currnode = curr->get_node();
1450         Node *parnode = currnode->get_parent();
1451
1452         if ((parnode && !parnode->backtrack_empty()) ||
1453                          !currnode->misc_empty() ||
1454                          !currnode->read_from_empty() ||
1455                          !currnode->future_value_empty() ||
1456                          !currnode->promise_empty() ||
1457                          !currnode->relseq_break_empty()) {
1458                 set_latest_backtrack(curr);
1459         }
1460 }
1461
1462 bool ModelChecker::promises_expired() const
1463 {
1464         for (unsigned int i = 0; i < promises->size(); i++) {
1465                 Promise *promise = (*promises)[i];
1466                 if (promise->get_expiration() < priv->used_sequence_numbers)
1467                         return true;
1468         }
1469         return false;
1470 }
1471
1472 /**
1473  * This is the strongest feasibility check available.
1474  * @return whether the current trace (partial or complete) must be a prefix of
1475  * a feasible trace.
1476  */
1477 bool ModelChecker::isfeasibleprefix() const
1478 {
1479         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1480 }
1481
1482 /**
1483  * Print disagnostic information about an infeasible execution
1484  * @param prefix A string to prefix the output with; if NULL, then a default
1485  * message prefix will be provided
1486  */
1487 void ModelChecker::print_infeasibility(const char *prefix) const
1488 {
1489         char buf[100];
1490         char *ptr = buf;
1491         if (mo_graph->checkForCycles())
1492                 ptr += sprintf(ptr, "[mo cycle]");
1493         if (priv->failed_promise)
1494                 ptr += sprintf(ptr, "[failed promise]");
1495         if (priv->too_many_reads)
1496                 ptr += sprintf(ptr, "[too many reads]");
1497         if (priv->no_valid_reads)
1498                 ptr += sprintf(ptr, "[no valid reads-from]");
1499         if (priv->bad_synchronization)
1500                 ptr += sprintf(ptr, "[bad sw ordering]");
1501         if (promises_expired())
1502                 ptr += sprintf(ptr, "[promise expired]");
1503         if (promises->size() != 0)
1504                 ptr += sprintf(ptr, "[unresolved promise]");
1505         if (ptr != buf)
1506                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1507 }
1508
1509 /**
1510  * Returns whether the current completed trace is feasible, except for pending
1511  * release sequences.
1512  */
1513 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1514 {
1515         return !is_infeasible() && promises->size() == 0;
1516 }
1517
1518 /**
1519  * Check if the current partial trace is infeasible. Does not check any
1520  * end-of-execution flags, which might rule out the execution. Thus, this is
1521  * useful only for ruling an execution as infeasible.
1522  * @return whether the current partial trace is infeasible.
1523  */
1524 bool ModelChecker::is_infeasible() const
1525 {
1526         return mo_graph->checkForCycles() ||
1527                 priv->no_valid_reads ||
1528                 priv->failed_promise ||
1529                 priv->too_many_reads ||
1530                 priv->bad_synchronization ||
1531                 promises_expired();
1532 }
1533
1534 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1535 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1536         ModelAction *lastread = get_last_action(act->get_tid());
1537         lastread->process_rmw(act);
1538         if (act->is_rmw()) {
1539                 if (lastread->get_reads_from())
1540                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1541                 else
1542                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1543                 mo_graph->commitChanges();
1544         }
1545         return lastread;
1546 }
1547
1548 /**
1549  * Checks whether a thread has read from the same write for too many times
1550  * without seeing the effects of a later write.
1551  *
1552  * Basic idea:
1553  * 1) there must a different write that we could read from that would satisfy the modification order,
1554  * 2) we must have read from the same value in excess of maxreads times, and
1555  * 3) that other write must have been in the reads_from set for maxreads times.
1556  *
1557  * If so, we decide that the execution is no longer feasible.
1558  */
1559 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1560 {
1561         if (params.maxreads != 0) {
1562                 if (curr->get_node()->get_read_from_size() <= 1)
1563                         return;
1564                 //Must make sure that execution is currently feasible...  We could
1565                 //accidentally clear by rolling back
1566                 if (is_infeasible())
1567                         return;
1568                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1569                 int tid = id_to_int(curr->get_tid());
1570
1571                 /* Skip checks */
1572                 if ((int)thrd_lists->size() <= tid)
1573                         return;
1574                 action_list_t *list = &(*thrd_lists)[tid];
1575
1576                 action_list_t::reverse_iterator rit = list->rbegin();
1577                 /* Skip past curr */
1578                 for (; (*rit) != curr; rit++)
1579                         ;
1580                 /* go past curr now */
1581                 rit++;
1582
1583                 action_list_t::reverse_iterator ritcopy = rit;
1584                 //See if we have enough reads from the same value
1585                 int count = 0;
1586                 for (; count < params.maxreads; rit++, count++) {
1587                         if (rit == list->rend())
1588                                 return;
1589                         ModelAction *act = *rit;
1590                         if (!act->is_read())
1591                                 return;
1592
1593                         if (act->get_reads_from() != rf)
1594                                 return;
1595                         if (act->get_node()->get_read_from_size() <= 1)
1596                                 return;
1597                 }
1598                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1599                         /* Get write */
1600                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1601
1602                         /* Need a different write */
1603                         if (write == rf)
1604                                 continue;
1605
1606                         /* Test to see whether this is a feasible write to read from */
1607                         /** NOTE: all members of read-from set should be
1608                          *  feasible, so we no longer check it here **/
1609
1610                         rit = ritcopy;
1611
1612                         bool feasiblewrite = true;
1613                         //new we need to see if this write works for everyone
1614
1615                         for (int loop = count; loop > 0; loop--, rit++) {
1616                                 ModelAction *act = *rit;
1617                                 bool foundvalue = false;
1618                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1619                                         if (act->get_node()->get_read_from_at(j) == write) {
1620                                                 foundvalue = true;
1621                                                 break;
1622                                         }
1623                                 }
1624                                 if (!foundvalue) {
1625                                         feasiblewrite = false;
1626                                         break;
1627                                 }
1628                         }
1629                         if (feasiblewrite) {
1630                                 priv->too_many_reads = true;
1631                                 return;
1632                         }
1633                 }
1634         }
1635 }
1636
1637 /**
1638  * Updates the mo_graph with the constraints imposed from the current
1639  * read.
1640  *
1641  * Basic idea is the following: Go through each other thread and find
1642  * the last action that happened before our read.  Two cases:
1643  *
1644  * (1) The action is a write => that write must either occur before
1645  * the write we read from or be the write we read from.
1646  *
1647  * (2) The action is a read => the write that that action read from
1648  * must occur before the write we read from or be the same write.
1649  *
1650  * @param curr The current action. Must be a read.
1651  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1652  * @return True if modification order edges were added; false otherwise
1653  */
1654 template <typename rf_type>
1655 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1656 {
1657         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1658         unsigned int i;
1659         bool added = false;
1660         ASSERT(curr->is_read());
1661
1662         /* Last SC fence in the current thread */
1663         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1664
1665         /* Iterate over all threads */
1666         for (i = 0; i < thrd_lists->size(); i++) {
1667                 /* Last SC fence in thread i */
1668                 ModelAction *last_sc_fence_thread_local = NULL;
1669                 if (int_to_id((int)i) != curr->get_tid())
1670                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1671
1672                 /* Last SC fence in thread i, before last SC fence in current thread */
1673                 ModelAction *last_sc_fence_thread_before = NULL;
1674                 if (last_sc_fence_local)
1675                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1676
1677                 /* Iterate over actions in thread, starting from most recent */
1678                 action_list_t *list = &(*thrd_lists)[i];
1679                 action_list_t::reverse_iterator rit;
1680                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1681                         ModelAction *act = *rit;
1682
1683                         if (act->is_write() && !act->equals(rf) && act != curr) {
1684                                 /* C++, Section 29.3 statement 5 */
1685                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1686                                                 *act < *last_sc_fence_thread_local) {
1687                                         added = mo_graph->addEdge(act, rf) || added;
1688                                         break;
1689                                 }
1690                                 /* C++, Section 29.3 statement 4 */
1691                                 else if (act->is_seqcst() && last_sc_fence_local &&
1692                                                 *act < *last_sc_fence_local) {
1693                                         added = mo_graph->addEdge(act, rf) || added;
1694                                         break;
1695                                 }
1696                                 /* C++, Section 29.3 statement 6 */
1697                                 else if (last_sc_fence_thread_before &&
1698                                                 *act < *last_sc_fence_thread_before) {
1699                                         added = mo_graph->addEdge(act, rf) || added;
1700                                         break;
1701                                 }
1702                         }
1703
1704                         /*
1705                          * Include at most one act per-thread that "happens
1706                          * before" curr. Don't consider reflexively.
1707                          */
1708                         if (act->happens_before(curr) && act != curr) {
1709                                 if (act->is_write()) {
1710                                         if (!act->equals(rf)) {
1711                                                 added = mo_graph->addEdge(act, rf) || added;
1712                                         }
1713                                 } else {
1714                                         const ModelAction *prevreadfrom = act->get_reads_from();
1715                                         //if the previous read is unresolved, keep going...
1716                                         if (prevreadfrom == NULL)
1717                                                 continue;
1718
1719                                         if (!prevreadfrom->equals(rf)) {
1720                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1721                                         }
1722                                 }
1723                                 break;
1724                         }
1725                 }
1726         }
1727
1728         /*
1729          * All compatible, thread-exclusive promises must be ordered after any
1730          * concrete loads from the same thread
1731          */
1732         for (unsigned int i = 0; i < promises->size(); i++)
1733                 if ((*promises)[i]->is_compatible_exclusive(curr))
1734                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1735
1736         return added;
1737 }
1738
1739 /**
1740  * Updates the mo_graph with the constraints imposed from the current write.
1741  *
1742  * Basic idea is the following: Go through each other thread and find
1743  * the lastest action that happened before our write.  Two cases:
1744  *
1745  * (1) The action is a write => that write must occur before
1746  * the current write
1747  *
1748  * (2) The action is a read => the write that that action read from
1749  * must occur before the current write.
1750  *
1751  * This method also handles two other issues:
1752  *
1753  * (I) Sequential Consistency: Making sure that if the current write is
1754  * seq_cst, that it occurs after the previous seq_cst write.
1755  *
1756  * (II) Sending the write back to non-synchronizing reads.
1757  *
1758  * @param curr The current action. Must be a write.
1759  * @return True if modification order edges were added; false otherwise
1760  */
1761 bool ModelChecker::w_modification_order(ModelAction *curr)
1762 {
1763         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1764         unsigned int i;
1765         bool added = false;
1766         ASSERT(curr->is_write());
1767
1768         if (curr->is_seqcst()) {
1769                 /* We have to at least see the last sequentially consistent write,
1770                          so we are initialized. */
1771                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1772                 if (last_seq_cst != NULL) {
1773                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1774                 }
1775         }
1776
1777         /* Last SC fence in the current thread */
1778         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1779
1780         /* Iterate over all threads */
1781         for (i = 0; i < thrd_lists->size(); i++) {
1782                 /* Last SC fence in thread i, before last SC fence in current thread */
1783                 ModelAction *last_sc_fence_thread_before = NULL;
1784                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1785                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1786
1787                 /* Iterate over actions in thread, starting from most recent */
1788                 action_list_t *list = &(*thrd_lists)[i];
1789                 action_list_t::reverse_iterator rit;
1790                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1791                         ModelAction *act = *rit;
1792                         if (act == curr) {
1793                                 /*
1794                                  * 1) If RMW and it actually read from something, then we
1795                                  * already have all relevant edges, so just skip to next
1796                                  * thread.
1797                                  *
1798                                  * 2) If RMW and it didn't read from anything, we should
1799                                  * whatever edge we can get to speed up convergence.
1800                                  *
1801                                  * 3) If normal write, we need to look at earlier actions, so
1802                                  * continue processing list.
1803                                  */
1804                                 if (curr->is_rmw()) {
1805                                         if (curr->get_reads_from() != NULL)
1806                                                 break;
1807                                         else
1808                                                 continue;
1809                                 } else
1810                                         continue;
1811                         }
1812
1813                         /* C++, Section 29.3 statement 7 */
1814                         if (last_sc_fence_thread_before && act->is_write() &&
1815                                         *act < *last_sc_fence_thread_before) {
1816                                 added = mo_graph->addEdge(act, curr) || added;
1817                                 break;
1818                         }
1819
1820                         /*
1821                          * Include at most one act per-thread that "happens
1822                          * before" curr
1823                          */
1824                         if (act->happens_before(curr)) {
1825                                 /*
1826                                  * Note: if act is RMW, just add edge:
1827                                  *   act --mo--> curr
1828                                  * The following edge should be handled elsewhere:
1829                                  *   readfrom(act) --mo--> act
1830                                  */
1831                                 if (act->is_write())
1832                                         added = mo_graph->addEdge(act, curr) || added;
1833                                 else if (act->is_read()) {
1834                                         //if previous read accessed a null, just keep going
1835                                         if (act->get_reads_from() == NULL)
1836                                                 continue;
1837                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1838                                 }
1839                                 break;
1840                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1841                                                      !act->same_thread(curr)) {
1842                                 /* We have an action that:
1843                                    (1) did not happen before us
1844                                    (2) is a read and we are a write
1845                                    (3) cannot synchronize with us
1846                                    (4) is in a different thread
1847                                    =>
1848                                    that read could potentially read from our write.  Note that
1849                                    these checks are overly conservative at this point, we'll
1850                                    do more checks before actually removing the
1851                                    pendingfuturevalue.
1852
1853                                  */
1854                                 if (thin_air_constraint_may_allow(curr, act)) {
1855                                         if (!is_infeasible())
1856                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1857                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1858                                                 add_future_value(curr, act);
1859                                 }
1860                         }
1861                 }
1862         }
1863
1864         /*
1865          * All compatible, thread-exclusive promises must be ordered after any
1866          * concrete stores to the same thread, or else they can be merged with
1867          * this store later
1868          */
1869         for (unsigned int i = 0; i < promises->size(); i++)
1870                 if ((*promises)[i]->is_compatible_exclusive(curr))
1871                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1872
1873         return added;
1874 }
1875
1876 /** Arbitrary reads from the future are not allowed.  Section 29.3
1877  * part 9 places some constraints.  This method checks one result of constraint
1878  * constraint.  Others require compiler support. */
1879 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1880 {
1881         if (!writer->is_rmw())
1882                 return true;
1883
1884         if (!reader->is_rmw())
1885                 return true;
1886
1887         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1888                 if (search == reader)
1889                         return false;
1890                 if (search->get_tid() == reader->get_tid() &&
1891                                 search->happens_before(reader))
1892                         break;
1893         }
1894
1895         return true;
1896 }
1897
1898 /**
1899  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1900  * some constraints. This method checks one the following constraint (others
1901  * require compiler support):
1902  *
1903  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1904  */
1905 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1906 {
1907         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1908         unsigned int i;
1909         /* Iterate over all threads */
1910         for (i = 0; i < thrd_lists->size(); i++) {
1911                 const ModelAction *write_after_read = NULL;
1912
1913                 /* Iterate over actions in thread, starting from most recent */
1914                 action_list_t *list = &(*thrd_lists)[i];
1915                 action_list_t::reverse_iterator rit;
1916                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1917                         ModelAction *act = *rit;
1918
1919                         /* Don't disallow due to act == reader */
1920                         if (!reader->happens_before(act) || reader == act)
1921                                 break;
1922                         else if (act->is_write())
1923                                 write_after_read = act;
1924                         else if (act->is_read() && act->get_reads_from() != NULL)
1925                                 write_after_read = act->get_reads_from();
1926                 }
1927
1928                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1929                         return false;
1930         }
1931         return true;
1932 }
1933
1934 /**
1935  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1936  * The ModelAction under consideration is expected to be taking part in
1937  * release/acquire synchronization as an object of the "reads from" relation.
1938  * Note that this can only provide release sequence support for RMW chains
1939  * which do not read from the future, as those actions cannot be traced until
1940  * their "promise" is fulfilled. Similarly, we may not even establish the
1941  * presence of a release sequence with certainty, as some modification order
1942  * constraints may be decided further in the future. Thus, this function
1943  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1944  * and a boolean representing certainty.
1945  *
1946  * @param rf The action that might be part of a release sequence. Must be a
1947  * write.
1948  * @param release_heads A pass-by-reference style return parameter. After
1949  * execution of this function, release_heads will contain the heads of all the
1950  * relevant release sequences, if any exists with certainty
1951  * @param pending A pass-by-reference style return parameter which is only used
1952  * when returning false (i.e., uncertain). Returns most information regarding
1953  * an uncertain release sequence, including any write operations that might
1954  * break the sequence.
1955  * @return true, if the ModelChecker is certain that release_heads is complete;
1956  * false otherwise
1957  */
1958 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1959                 rel_heads_list_t *release_heads,
1960                 struct release_seq *pending) const
1961 {
1962         /* Only check for release sequences if there are no cycles */
1963         if (mo_graph->checkForCycles())
1964                 return false;
1965
1966         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1967                 ASSERT(rf->is_write());
1968
1969                 if (rf->is_release())
1970                         release_heads->push_back(rf);
1971                 else if (rf->get_last_fence_release())
1972                         release_heads->push_back(rf->get_last_fence_release());
1973                 if (!rf->is_rmw())
1974                         break; /* End of RMW chain */
1975
1976                 /** @todo Need to be smarter here...  In the linux lock
1977                  * example, this will run to the beginning of the program for
1978                  * every acquire. */
1979                 /** @todo The way to be smarter here is to keep going until 1
1980                  * thread has a release preceded by an acquire and you've seen
1981                  *       both. */
1982
1983                 /* acq_rel RMW is a sufficient stopping condition */
1984                 if (rf->is_acquire() && rf->is_release())
1985                         return true; /* complete */
1986         };
1987         if (!rf) {
1988                 /* read from future: need to settle this later */
1989                 pending->rf = NULL;
1990                 return false; /* incomplete */
1991         }
1992
1993         if (rf->is_release())
1994                 return true; /* complete */
1995
1996         /* else relaxed write
1997          * - check for fence-release in the same thread (29.8, stmt. 3)
1998          * - check modification order for contiguous subsequence
1999          *   -> rf must be same thread as release */
2000
2001         const ModelAction *fence_release = rf->get_last_fence_release();
2002         /* Synchronize with a fence-release unconditionally; we don't need to
2003          * find any more "contiguous subsequence..." for it */
2004         if (fence_release)
2005                 release_heads->push_back(fence_release);
2006
2007         int tid = id_to_int(rf->get_tid());
2008         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2009         action_list_t *list = &(*thrd_lists)[tid];
2010         action_list_t::const_reverse_iterator rit;
2011
2012         /* Find rf in the thread list */
2013         rit = std::find(list->rbegin(), list->rend(), rf);
2014         ASSERT(rit != list->rend());
2015
2016         /* Find the last {write,fence}-release */
2017         for (; rit != list->rend(); rit++) {
2018                 if (fence_release && *(*rit) < *fence_release)
2019                         break;
2020                 if ((*rit)->is_release())
2021                         break;
2022         }
2023         if (rit == list->rend()) {
2024                 /* No write-release in this thread */
2025                 return true; /* complete */
2026         } else if (fence_release && *(*rit) < *fence_release) {
2027                 /* The fence-release is more recent (and so, "stronger") than
2028                  * the most recent write-release */
2029                 return true; /* complete */
2030         } /* else, need to establish contiguous release sequence */
2031         ModelAction *release = *rit;
2032
2033         ASSERT(rf->same_thread(release));
2034
2035         pending->writes.clear();
2036
2037         bool certain = true;
2038         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2039                 if (id_to_int(rf->get_tid()) == (int)i)
2040                         continue;
2041                 list = &(*thrd_lists)[i];
2042
2043                 /* Can we ensure no future writes from this thread may break
2044                  * the release seq? */
2045                 bool future_ordered = false;
2046
2047                 ModelAction *last = get_last_action(int_to_id(i));
2048                 Thread *th = get_thread(int_to_id(i));
2049                 if ((last && rf->happens_before(last)) ||
2050                                 !is_enabled(th) ||
2051                                 th->is_complete())
2052                         future_ordered = true;
2053
2054                 ASSERT(!th->is_model_thread() || future_ordered);
2055
2056                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2057                         const ModelAction *act = *rit;
2058                         /* Reach synchronization -> this thread is complete */
2059                         if (act->happens_before(release))
2060                                 break;
2061                         if (rf->happens_before(act)) {
2062                                 future_ordered = true;
2063                                 continue;
2064                         }
2065
2066                         /* Only non-RMW writes can break release sequences */
2067                         if (!act->is_write() || act->is_rmw())
2068                                 continue;
2069
2070                         /* Check modification order */
2071                         if (mo_graph->checkReachable(rf, act)) {
2072                                 /* rf --mo--> act */
2073                                 future_ordered = true;
2074                                 continue;
2075                         }
2076                         if (mo_graph->checkReachable(act, release))
2077                                 /* act --mo--> release */
2078                                 break;
2079                         if (mo_graph->checkReachable(release, act) &&
2080                                       mo_graph->checkReachable(act, rf)) {
2081                                 /* release --mo-> act --mo--> rf */
2082                                 return true; /* complete */
2083                         }
2084                         /* act may break release sequence */
2085                         pending->writes.push_back(act);
2086                         certain = false;
2087                 }
2088                 if (!future_ordered)
2089                         certain = false; /* This thread is uncertain */
2090         }
2091
2092         if (certain) {
2093                 release_heads->push_back(release);
2094                 pending->writes.clear();
2095         } else {
2096                 pending->release = release;
2097                 pending->rf = rf;
2098         }
2099         return certain;
2100 }
2101
2102 /**
2103  * An interface for getting the release sequence head(s) with which a
2104  * given ModelAction must synchronize. This function only returns a non-empty
2105  * result when it can locate a release sequence head with certainty. Otherwise,
2106  * it may mark the internal state of the ModelChecker so that it will handle
2107  * the release sequence at a later time, causing @a acquire to update its
2108  * synchronization at some later point in execution.
2109  *
2110  * @param acquire The 'acquire' action that may synchronize with a release
2111  * sequence
2112  * @param read The read action that may read from a release sequence; this may
2113  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2114  * when 'acquire' is a fence-acquire)
2115  * @param release_heads A pass-by-reference return parameter. Will be filled
2116  * with the head(s) of the release sequence(s), if they exists with certainty.
2117  * @see ModelChecker::release_seq_heads
2118  */
2119 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2120                 ModelAction *read, rel_heads_list_t *release_heads)
2121 {
2122         const ModelAction *rf = read->get_reads_from();
2123         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2124         sequence->acquire = acquire;
2125         sequence->read = read;
2126
2127         if (!release_seq_heads(rf, release_heads, sequence)) {
2128                 /* add act to 'lazy checking' list */
2129                 pending_rel_seqs->push_back(sequence);
2130         } else {
2131                 snapshot_free(sequence);
2132         }
2133 }
2134
2135 /**
2136  * Attempt to resolve all stashed operations that might synchronize with a
2137  * release sequence for a given location. This implements the "lazy" portion of
2138  * determining whether or not a release sequence was contiguous, since not all
2139  * modification order information is present at the time an action occurs.
2140  *
2141  * @param location The location/object that should be checked for release
2142  * sequence resolutions. A NULL value means to check all locations.
2143  * @param work_queue The work queue to which to add work items as they are
2144  * generated
2145  * @return True if any updates occurred (new synchronization, new mo_graph
2146  * edges)
2147  */
2148 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2149 {
2150         bool updated = false;
2151         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2152         while (it != pending_rel_seqs->end()) {
2153                 struct release_seq *pending = *it;
2154                 ModelAction *acquire = pending->acquire;
2155                 const ModelAction *read = pending->read;
2156
2157                 /* Only resolve sequences on the given location, if provided */
2158                 if (location && read->get_location() != location) {
2159                         it++;
2160                         continue;
2161                 }
2162
2163                 const ModelAction *rf = read->get_reads_from();
2164                 rel_heads_list_t release_heads;
2165                 bool complete;
2166                 complete = release_seq_heads(rf, &release_heads, pending);
2167                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2168                         if (!acquire->has_synchronized_with(release_heads[i])) {
2169                                 if (acquire->synchronize_with(release_heads[i]))
2170                                         updated = true;
2171                                 else
2172                                         set_bad_synchronization();
2173                         }
2174                 }
2175
2176                 if (updated) {
2177                         /* Re-check all pending release sequences */
2178                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2179                         /* Re-check read-acquire for mo_graph edges */
2180                         if (acquire->is_read())
2181                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2182
2183                         /* propagate synchronization to later actions */
2184                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2185                         for (; (*rit) != acquire; rit++) {
2186                                 ModelAction *propagate = *rit;
2187                                 if (acquire->happens_before(propagate)) {
2188                                         propagate->synchronize_with(acquire);
2189                                         /* Re-check 'propagate' for mo_graph edges */
2190                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2191                                 }
2192                         }
2193                 }
2194                 if (complete) {
2195                         it = pending_rel_seqs->erase(it);
2196                         snapshot_free(pending);
2197                 } else {
2198                         it++;
2199                 }
2200         }
2201
2202         // If we resolved promises or data races, see if we have realized a data race.
2203         checkDataRaces();
2204
2205         return updated;
2206 }
2207
2208 /**
2209  * Performs various bookkeeping operations for the current ModelAction. For
2210  * instance, adds action to the per-object, per-thread action vector and to the
2211  * action trace list of all thread actions.
2212  *
2213  * @param act is the ModelAction to add.
2214  */
2215 void ModelChecker::add_action_to_lists(ModelAction *act)
2216 {
2217         int tid = id_to_int(act->get_tid());
2218         ModelAction *uninit = NULL;
2219         int uninit_id = -1;
2220         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2221         if (list->empty() && act->is_atomic_var()) {
2222                 uninit = new_uninitialized_action(act->get_location());
2223                 uninit_id = id_to_int(uninit->get_tid());
2224                 list->push_back(uninit);
2225         }
2226         list->push_back(act);
2227
2228         action_trace->push_back(act);
2229         if (uninit)
2230                 action_trace->push_front(uninit);
2231
2232         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2233         if (tid >= (int)vec->size())
2234                 vec->resize(priv->next_thread_id);
2235         (*vec)[tid].push_back(act);
2236         if (uninit)
2237                 (*vec)[uninit_id].push_front(uninit);
2238
2239         if ((int)thrd_last_action->size() <= tid)
2240                 thrd_last_action->resize(get_num_threads());
2241         (*thrd_last_action)[tid] = act;
2242         if (uninit)
2243                 (*thrd_last_action)[uninit_id] = uninit;
2244
2245         if (act->is_fence() && act->is_release()) {
2246                 if ((int)thrd_last_fence_release->size() <= tid)
2247                         thrd_last_fence_release->resize(get_num_threads());
2248                 (*thrd_last_fence_release)[tid] = act;
2249         }
2250
2251         if (act->is_wait()) {
2252                 void *mutex_loc = (void *) act->get_value();
2253                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2254
2255                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2256                 if (tid >= (int)vec->size())
2257                         vec->resize(priv->next_thread_id);
2258                 (*vec)[tid].push_back(act);
2259         }
2260 }
2261
2262 /**
2263  * @brief Get the last action performed by a particular Thread
2264  * @param tid The thread ID of the Thread in question
2265  * @return The last action in the thread
2266  */
2267 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2268 {
2269         int threadid = id_to_int(tid);
2270         if (threadid < (int)thrd_last_action->size())
2271                 return (*thrd_last_action)[id_to_int(tid)];
2272         else
2273                 return NULL;
2274 }
2275
2276 /**
2277  * @brief Get the last fence release performed by a particular Thread
2278  * @param tid The thread ID of the Thread in question
2279  * @return The last fence release in the thread, if one exists; NULL otherwise
2280  */
2281 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2282 {
2283         int threadid = id_to_int(tid);
2284         if (threadid < (int)thrd_last_fence_release->size())
2285                 return (*thrd_last_fence_release)[id_to_int(tid)];
2286         else
2287                 return NULL;
2288 }
2289
2290 /**
2291  * Gets the last memory_order_seq_cst write (in the total global sequence)
2292  * performed on a particular object (i.e., memory location), not including the
2293  * current action.
2294  * @param curr The current ModelAction; also denotes the object location to
2295  * check
2296  * @return The last seq_cst write
2297  */
2298 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2299 {
2300         void *location = curr->get_location();
2301         action_list_t *list = get_safe_ptr_action(obj_map, location);
2302         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2303         action_list_t::reverse_iterator rit;
2304         for (rit = list->rbegin(); rit != list->rend(); rit++)
2305                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2306                         return *rit;
2307         return NULL;
2308 }
2309
2310 /**
2311  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2312  * performed in a particular thread, prior to a particular fence.
2313  * @param tid The ID of the thread to check
2314  * @param before_fence The fence from which to begin the search; if NULL, then
2315  * search for the most recent fence in the thread.
2316  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2317  */
2318 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2319 {
2320         /* All fences should have NULL location */
2321         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2322         action_list_t::reverse_iterator rit = list->rbegin();
2323
2324         if (before_fence) {
2325                 for (; rit != list->rend(); rit++)
2326                         if (*rit == before_fence)
2327                                 break;
2328
2329                 ASSERT(*rit == before_fence);
2330                 rit++;
2331         }
2332
2333         for (; rit != list->rend(); rit++)
2334                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2335                         return *rit;
2336         return NULL;
2337 }
2338
2339 /**
2340  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2341  * location). This function identifies the mutex according to the current
2342  * action, which is presumed to perform on the same mutex.
2343  * @param curr The current ModelAction; also denotes the object location to
2344  * check
2345  * @return The last unlock operation
2346  */
2347 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2348 {
2349         void *location = curr->get_location();
2350         action_list_t *list = get_safe_ptr_action(obj_map, location);
2351         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2352         action_list_t::reverse_iterator rit;
2353         for (rit = list->rbegin(); rit != list->rend(); rit++)
2354                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2355                         return *rit;
2356         return NULL;
2357 }
2358
2359 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2360 {
2361         ModelAction *parent = get_last_action(tid);
2362         if (!parent)
2363                 parent = get_thread(tid)->get_creation();
2364         return parent;
2365 }
2366
2367 /**
2368  * Returns the clock vector for a given thread.
2369  * @param tid The thread whose clock vector we want
2370  * @return Desired clock vector
2371  */
2372 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2373 {
2374         return get_parent_action(tid)->get_cv();
2375 }
2376
2377 /**
2378  * Resolve a set of Promises with a current write. The set is provided in the
2379  * Node corresponding to @a write.
2380  * @param write The ModelAction that is fulfilling Promises
2381  * @return True if promises were resolved; false otherwise
2382  */
2383 bool ModelChecker::resolve_promises(ModelAction *write)
2384 {
2385         bool haveResolved = false;
2386         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2387         promise_list_t mustResolve, resolved;
2388
2389         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2390                 Promise *promise = (*promises)[promise_index];
2391                 if (write->get_node()->get_promise(i)) {
2392                         ModelAction *read = promise->get_action();
2393                         read_from(read, write);
2394                         //Make sure the promise's value matches the write's value
2395                         ASSERT(promise->is_compatible(write));
2396                         mo_graph->resolvePromise(read, write, &mustResolve);
2397
2398                         resolved.push_back(promise);
2399                         promises->erase(promises->begin() + promise_index);
2400                         actions_to_check.push_back(read);
2401
2402                         haveResolved = true;
2403                 } else
2404                         promise_index++;
2405         }
2406
2407         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2408                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2409                                 == resolved.end())
2410                         priv->failed_promise = true;
2411         }
2412         for (unsigned int i = 0; i < resolved.size(); i++)
2413                 delete resolved[i];
2414         //Check whether reading these writes has made threads unable to
2415         //resolve promises
2416
2417         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2418                 ModelAction *read = actions_to_check[i];
2419                 mo_check_promises(read, true);
2420         }
2421
2422         return haveResolved;
2423 }
2424
2425 /**
2426  * Compute the set of promises that could potentially be satisfied by this
2427  * action. Note that the set computation actually appears in the Node, not in
2428  * ModelChecker.
2429  * @param curr The ModelAction that may satisfy promises
2430  */
2431 void ModelChecker::compute_promises(ModelAction *curr)
2432 {
2433         for (unsigned int i = 0; i < promises->size(); i++) {
2434                 Promise *promise = (*promises)[i];
2435                 const ModelAction *act = promise->get_action();
2436                 if (!act->happens_before(curr) &&
2437                                 act->is_read() &&
2438                                 !act->could_synchronize_with(curr) &&
2439                                 !act->same_thread(curr) &&
2440                                 act->get_location() == curr->get_location() &&
2441                                 promise->get_value() == curr->get_value()) {
2442                         curr->get_node()->set_promise(i, act->is_rmw());
2443                 }
2444         }
2445 }
2446
2447 /** Checks promises in response to change in ClockVector Threads. */
2448 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2449 {
2450         for (unsigned int i = 0; i < promises->size(); i++) {
2451                 Promise *promise = (*promises)[i];
2452                 const ModelAction *act = promise->get_action();
2453                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2454                                 merge_cv->synchronized_since(act)) {
2455                         if (promise->eliminate_thread(tid)) {
2456                                 //Promise has failed
2457                                 priv->failed_promise = true;
2458                                 return;
2459                         }
2460                 }
2461         }
2462 }
2463
2464 void ModelChecker::check_promises_thread_disabled()
2465 {
2466         for (unsigned int i = 0; i < promises->size(); i++) {
2467                 Promise *promise = (*promises)[i];
2468                 if (promise->has_failed()) {
2469                         priv->failed_promise = true;
2470                         return;
2471                 }
2472         }
2473 }
2474
2475 /**
2476  * @brief Checks promises in response to addition to modification order for
2477  * threads.
2478  *
2479  * We test whether threads are still available for satisfying promises after an
2480  * addition to our modification order constraints. Those that are unavailable
2481  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2482  * that promise has failed.
2483  *
2484  * @param act The ModelAction which updated the modification order
2485  * @param is_read_check Should be true if act is a read and we must check for
2486  * updates to the store from which it read (there is a distinction here for
2487  * RMW's, which are both a load and a store)
2488  */
2489 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2490 {
2491         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2492
2493         for (unsigned int i = 0; i < promises->size(); i++) {
2494                 Promise *promise = (*promises)[i];
2495                 const ModelAction *pread = promise->get_action();
2496
2497                 // Is this promise on the same location?
2498                 if (!pread->same_var(write))
2499                         continue;
2500
2501                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2502                         priv->failed_promise = true;
2503                         return;
2504                 }
2505
2506                 // Don't do any lookups twice for the same thread
2507                 if (!promise->thread_is_available(act->get_tid()))
2508                         continue;
2509
2510                 if (mo_graph->checkReachable(promise, write)) {
2511                         if (mo_graph->checkPromise(write, promise)) {
2512                                 priv->failed_promise = true;
2513                                 return;
2514                         }
2515                 }
2516         }
2517 }
2518
2519 /**
2520  * Compute the set of writes that may break the current pending release
2521  * sequence. This information is extracted from previou release sequence
2522  * calculations.
2523  *
2524  * @param curr The current ModelAction. Must be a release sequence fixup
2525  * action.
2526  */
2527 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2528 {
2529         if (pending_rel_seqs->empty())
2530                 return;
2531
2532         struct release_seq *pending = pending_rel_seqs->back();
2533         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2534                 const ModelAction *write = pending->writes[i];
2535                 curr->get_node()->add_relseq_break(write);
2536         }
2537
2538         /* NULL means don't break the sequence; just synchronize */
2539         curr->get_node()->add_relseq_break(NULL);
2540 }
2541
2542 /**
2543  * Build up an initial set of all past writes that this 'read' action may read
2544  * from. This set is determined by the clock vector's "happens before"
2545  * relationship.
2546  * @param curr is the current ModelAction that we are exploring; it must be a
2547  * 'read' operation.
2548  */
2549 void ModelChecker::build_reads_from_past(ModelAction *curr)
2550 {
2551         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2552         unsigned int i;
2553         ASSERT(curr->is_read());
2554
2555         ModelAction *last_sc_write = NULL;
2556
2557         if (curr->is_seqcst())
2558                 last_sc_write = get_last_seq_cst_write(curr);
2559
2560         /* Iterate over all threads */
2561         for (i = 0; i < thrd_lists->size(); i++) {
2562                 /* Iterate over actions in thread, starting from most recent */
2563                 action_list_t *list = &(*thrd_lists)[i];
2564                 action_list_t::reverse_iterator rit;
2565                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2566                         ModelAction *act = *rit;
2567
2568                         /* Only consider 'write' actions */
2569                         if (!act->is_write() || act == curr)
2570                                 continue;
2571
2572                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2573                         bool allow_read = true;
2574
2575                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2576                                 allow_read = false;
2577                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2578                                 allow_read = false;
2579
2580                         if (allow_read) {
2581                                 /* Only add feasible reads */
2582                                 mo_graph->startChanges();
2583                                 r_modification_order(curr, act);
2584                                 if (!is_infeasible())
2585                                         curr->get_node()->add_read_from(act);
2586                                 mo_graph->rollbackChanges();
2587                         }
2588
2589                         /* Include at most one act per-thread that "happens before" curr */
2590                         if (act->happens_before(curr))
2591                                 break;
2592                 }
2593         }
2594         /* We may find no valid may-read-from only if the execution is doomed */
2595         if (!curr->get_node()->get_read_from_size()) {
2596                 priv->no_valid_reads = true;
2597                 set_assert();
2598         }
2599
2600         if (DBG_ENABLED()) {
2601                 model_print("Reached read action:\n");
2602                 curr->print();
2603                 model_print("Printing may_read_from\n");
2604                 curr->get_node()->print_may_read_from();
2605                 model_print("End printing may_read_from\n");
2606         }
2607 }
2608
2609 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2610 {
2611         for ( ; write != NULL; write = write->get_reads_from()) {
2612                 /* UNINIT actions don't have a Node, and they never sleep */
2613                 if (write->is_uninitialized())
2614                         return true;
2615                 Node *prevnode = write->get_node()->get_parent();
2616
2617                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2618                 if (write->is_release() && thread_sleep)
2619                         return true;
2620                 if (!write->is_rmw())
2621                         return false;
2622         }
2623         return true;
2624 }
2625
2626 /**
2627  * @brief Create a new action representing an uninitialized atomic
2628  * @param location The memory location of the atomic object
2629  * @return A pointer to a new ModelAction
2630  */
2631 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2632 {
2633         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2634         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2635         act->create_cv(NULL);
2636         return act;
2637 }
2638
2639 static void print_list(action_list_t *list)
2640 {
2641         action_list_t::iterator it;
2642
2643         model_print("---------------------------------------------------------------------\n");
2644
2645         unsigned int hash = 0;
2646
2647         for (it = list->begin(); it != list->end(); it++) {
2648                 (*it)->print();
2649                 hash = hash^(hash<<3)^((*it)->hash());
2650         }
2651         model_print("HASH %u\n", hash);
2652         model_print("---------------------------------------------------------------------\n");
2653 }
2654
2655 #if SUPPORT_MOD_ORDER_DUMP
2656 void ModelChecker::dumpGraph(char *filename) const
2657 {
2658         char buffer[200];
2659         sprintf(buffer, "%s.dot", filename);
2660         FILE *file = fopen(buffer, "w");
2661         fprintf(file, "digraph %s {\n", filename);
2662         mo_graph->dumpNodes(file);
2663         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2664
2665         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2666                 ModelAction *action = *it;
2667                 if (action->is_read()) {
2668                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2669                         if (action->get_reads_from() != NULL)
2670                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2671                 }
2672                 if (thread_array[action->get_tid()] != NULL) {
2673                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2674                 }
2675
2676                 thread_array[action->get_tid()] = action;
2677         }
2678         fprintf(file, "}\n");
2679         model_free(thread_array);
2680         fclose(file);
2681 }
2682 #endif
2683
2684 /** @brief Prints an execution trace summary. */
2685 void ModelChecker::print_summary() const
2686 {
2687 #if SUPPORT_MOD_ORDER_DUMP
2688         char buffername[100];
2689         sprintf(buffername, "exec%04u", stats.num_total);
2690         mo_graph->dumpGraphToFile(buffername);
2691         sprintf(buffername, "graph%04u", stats.num_total);
2692         dumpGraph(buffername);
2693 #endif
2694
2695         model_print("Execution %d:", stats.num_total);
2696         if (isfeasibleprefix())
2697                 model_print("\n");
2698         else
2699                 print_infeasibility(" INFEASIBLE");
2700         print_list(action_trace);
2701         model_print("\n");
2702 }
2703
2704 /**
2705  * Add a Thread to the system for the first time. Should only be called once
2706  * per thread.
2707  * @param t The Thread to add
2708  */
2709 void ModelChecker::add_thread(Thread *t)
2710 {
2711         thread_map->put(id_to_int(t->get_id()), t);
2712         scheduler->add_thread(t);
2713 }
2714
2715 /**
2716  * Removes a thread from the scheduler.
2717  * @param the thread to remove.
2718  */
2719 void ModelChecker::remove_thread(Thread *t)
2720 {
2721         scheduler->remove_thread(t);
2722 }
2723
2724 /**
2725  * @brief Get a Thread reference by its ID
2726  * @param tid The Thread's ID
2727  * @return A Thread reference
2728  */
2729 Thread * ModelChecker::get_thread(thread_id_t tid) const
2730 {
2731         return thread_map->get(id_to_int(tid));
2732 }
2733
2734 /**
2735  * @brief Get a reference to the Thread in which a ModelAction was executed
2736  * @param act The ModelAction
2737  * @return A Thread reference
2738  */
2739 Thread * ModelChecker::get_thread(const ModelAction *act) const
2740 {
2741         return get_thread(act->get_tid());
2742 }
2743
2744 /**
2745  * @brief Check if a Thread is currently enabled
2746  * @param t The Thread to check
2747  * @return True if the Thread is currently enabled
2748  */
2749 bool ModelChecker::is_enabled(Thread *t) const
2750 {
2751         return scheduler->is_enabled(t);
2752 }
2753
2754 /**
2755  * @brief Check if a Thread is currently enabled
2756  * @param tid The ID of the Thread to check
2757  * @return True if the Thread is currently enabled
2758  */
2759 bool ModelChecker::is_enabled(thread_id_t tid) const
2760 {
2761         return scheduler->is_enabled(tid);
2762 }
2763
2764 /**
2765  * Switch from a model-checker context to a user-thread context. This is the
2766  * complement of ModelChecker::switch_to_master and must be called from the
2767  * model-checker context
2768  *
2769  * @param thread The user-thread to switch to
2770  */
2771 void ModelChecker::switch_from_master(Thread *thread)
2772 {
2773         scheduler->set_current_thread(thread);
2774         Thread::swap(&system_context, thread);
2775 }
2776
2777 /**
2778  * Switch from a user-context to the "master thread" context (a.k.a. system
2779  * context). This switch is made with the intention of exploring a particular
2780  * model-checking action (described by a ModelAction object). Must be called
2781  * from a user-thread context.
2782  *
2783  * @param act The current action that will be explored. May be NULL only if
2784  * trace is exiting via an assertion (see ModelChecker::set_assert and
2785  * ModelChecker::has_asserted).
2786  * @return Return the value returned by the current action
2787  */
2788 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2789 {
2790         DBG();
2791         Thread *old = thread_current();
2792         ASSERT(!old->get_pending());
2793         old->set_pending(act);
2794         if (Thread::swap(old, &system_context) < 0) {
2795                 perror("swap threads");
2796                 exit(EXIT_FAILURE);
2797         }
2798         return old->get_return_value();
2799 }
2800
2801 /**
2802  * Takes the next step in the execution, if possible.
2803  * @param curr The current step to take
2804  * @return Returns the next Thread to run, if any; NULL if this execution
2805  * should terminate
2806  */
2807 Thread * ModelChecker::take_step(ModelAction *curr)
2808 {
2809         Thread *curr_thrd = get_thread(curr);
2810         ASSERT(curr_thrd->get_state() == THREAD_READY);
2811
2812         curr = check_current_action(curr);
2813
2814         /* Infeasible -> don't take any more steps */
2815         if (is_infeasible())
2816                 return NULL;
2817         else if (isfeasibleprefix() && have_bug_reports()) {
2818                 set_assert();
2819                 return NULL;
2820         }
2821
2822         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2823                 return NULL;
2824
2825         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2826                 scheduler->remove_thread(curr_thrd);
2827
2828         Thread *next_thrd = get_next_thread(curr);
2829
2830         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2831                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2832
2833         return next_thrd;
2834 }
2835
2836 /** Wrapper to run the user's main function, with appropriate arguments */
2837 void user_main_wrapper(void *)
2838 {
2839         user_main(model->params.argc, model->params.argv);
2840 }
2841
2842 /** @brief Run ModelChecker for the user program */
2843 void ModelChecker::run()
2844 {
2845         do {
2846                 thrd_t user_thread;
2847                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2848                 add_thread(t);
2849
2850                 do {
2851                         /*
2852                          * Stash next pending action(s) for thread(s). There
2853                          * should only need to stash one thread's action--the
2854                          * thread which just took a step--plus the first step
2855                          * for any newly-created thread
2856                          */
2857                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2858                                 thread_id_t tid = int_to_id(i);
2859                                 Thread *thr = get_thread(tid);
2860                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2861                                         switch_from_master(thr);
2862                                 }
2863                         }
2864
2865                         /* Catch assertions from prior take_step or from
2866                          * between-ModelAction bugs (e.g., data races) */
2867                         if (has_asserted())
2868                                 break;
2869
2870                         /* Consume the next action for a Thread */
2871                         ModelAction *curr = t->get_pending();
2872                         t->set_pending(NULL);
2873                         t = take_step(curr);
2874                 } while (t && !t->is_model_thread());
2875
2876                 /*
2877                  * Launch end-of-execution release sequence fixups only when
2878                  * the execution is otherwise feasible AND there are:
2879                  *
2880                  * (1) pending release sequences
2881                  * (2) pending assertions that could be invalidated by a change
2882                  * in clock vectors (i.e., data races)
2883                  * (3) no pending promises
2884                  */
2885                 while (!pending_rel_seqs->empty() &&
2886                                 is_feasible_prefix_ignore_relseq() &&
2887                                 !unrealizedraces.empty()) {
2888                         model_print("*** WARNING: release sequence fixup action "
2889                                         "(%zu pending release seuqence(s)) ***\n",
2890                                         pending_rel_seqs->size());
2891                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2892                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2893                                         model_thread);
2894                         take_step(fixup);
2895                 };
2896         } while (next_execution());
2897
2898         model_print("******* Model-checking complete: *******\n");
2899         print_stats();
2900 }