schedule: simplify Scheduler::select_next_thread()
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr The current ModelAction. This action might guide the choice of
218  * next thread.
219  * @return The next chosen thread to run, if any exist. Or else if no threads
220  * remain to be executed, return NULL.
221  */
222 Thread * ModelChecker::get_next_thread(ModelAction *curr)
223 {
224         thread_id_t tid;
225
226         if (curr != NULL) {
227                 /* Do not split atomic actions. */
228                 if (curr->is_rmwr())
229                         return get_thread(curr);
230                 else if (curr->get_type() == THREAD_CREATE)
231                         return curr->get_thread_operand();
232         }
233
234         /*
235          * Have we completed exploring the preselected path? Then let the
236          * scheduler decide
237          */
238         if (diverge == NULL)
239                 return scheduler->select_next_thread();
240
241         /* Else, we are trying to replay an execution */
242         ModelAction *next = node_stack->get_next()->get_action();
243
244         if (next == diverge) {
245                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
246                         earliest_diverge = diverge;
247
248                 Node *nextnode = next->get_node();
249                 Node *prevnode = nextnode->get_parent();
250                 scheduler->update_sleep_set(prevnode);
251
252                 /* Reached divergence point */
253                 if (nextnode->increment_misc()) {
254                         /* The next node will try to satisfy a different misc_index values. */
255                         tid = next->get_tid();
256                         node_stack->pop_restofstack(2);
257                 } else if (nextnode->increment_promise()) {
258                         /* The next node will try to satisfy a different set of promises. */
259                         tid = next->get_tid();
260                         node_stack->pop_restofstack(2);
261                 } else if (nextnode->increment_read_from()) {
262                         /* The next node will read from a different value. */
263                         tid = next->get_tid();
264                         node_stack->pop_restofstack(2);
265                 } else if (nextnode->increment_future_value()) {
266                         /* The next node will try to read from a different future value. */
267                         tid = next->get_tid();
268                         node_stack->pop_restofstack(2);
269                 } else if (nextnode->increment_relseq_break()) {
270                         /* The next node will try to resolve a release sequence differently */
271                         tid = next->get_tid();
272                         node_stack->pop_restofstack(2);
273                 } else {
274                         ASSERT(prevnode);
275                         /* Make a different thread execute for next step */
276                         scheduler->add_sleep(get_thread(next->get_tid()));
277                         tid = prevnode->get_next_backtrack();
278                         /* Make sure the backtracked thread isn't sleeping. */
279                         node_stack->pop_restofstack(1);
280                         if (diverge == earliest_diverge) {
281                                 earliest_diverge = prevnode->get_action();
282                         }
283                 }
284                 /* The correct sleep set is in the parent node. */
285                 execute_sleep_set();
286
287                 DEBUG("*** Divergence point ***\n");
288
289                 diverge = NULL;
290         } else {
291                 tid = next->get_tid();
292         }
293         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
294         ASSERT(tid != THREAD_ID_T_NONE);
295         return thread_map->get(id_to_int(tid));
296 }
297
298 /**
299  * We need to know what the next actions of all threads in the sleep
300  * set will be.  This method computes them and stores the actions at
301  * the corresponding thread object's pending action.
302  */
303
304 void ModelChecker::execute_sleep_set()
305 {
306         for (unsigned int i = 0; i < get_num_threads(); i++) {
307                 thread_id_t tid = int_to_id(i);
308                 Thread *thr = get_thread(tid);
309                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
310                         thr->get_pending()->set_sleep_flag();
311                 }
312         }
313 }
314
315 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
316 {
317         for (unsigned int i = 0; i < get_num_threads(); i++) {
318                 Thread *thr = get_thread(int_to_id(i));
319                 if (scheduler->is_sleep_set(thr)) {
320                         ModelAction *pending_act = thr->get_pending();
321                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
322                                 //Remove this thread from sleep set
323                                 scheduler->remove_sleep(thr);
324                 }
325         }
326 }
327
328 /** @brief Alert the model-checker that an incorrectly-ordered
329  * synchronization was made */
330 void ModelChecker::set_bad_synchronization()
331 {
332         priv->bad_synchronization = true;
333 }
334
335 /**
336  * Check whether the current trace has triggered an assertion which should halt
337  * its execution.
338  *
339  * @return True, if the execution should be aborted; false otherwise
340  */
341 bool ModelChecker::has_asserted() const
342 {
343         return priv->asserted;
344 }
345
346 /**
347  * Trigger a trace assertion which should cause this execution to be halted.
348  * This can be due to a detected bug or due to an infeasibility that should
349  * halt ASAP.
350  */
351 void ModelChecker::set_assert()
352 {
353         priv->asserted = true;
354 }
355
356 /**
357  * Check if we are in a deadlock. Should only be called at the end of an
358  * execution, although it should not give false positives in the middle of an
359  * execution (there should be some ENABLED thread).
360  *
361  * @return True if program is in a deadlock; false otherwise
362  */
363 bool ModelChecker::is_deadlocked() const
364 {
365         bool blocking_threads = false;
366         for (unsigned int i = 0; i < get_num_threads(); i++) {
367                 thread_id_t tid = int_to_id(i);
368                 if (is_enabled(tid))
369                         return false;
370                 Thread *t = get_thread(tid);
371                 if (!t->is_model_thread() && t->get_pending())
372                         blocking_threads = true;
373         }
374         return blocking_threads;
375 }
376
377 /**
378  * Check if this is a complete execution. That is, have all thread completed
379  * execution (rather than exiting because sleep sets have forced a redundant
380  * execution).
381  *
382  * @return True if the execution is complete.
383  */
384 bool ModelChecker::is_complete_execution() const
385 {
386         for (unsigned int i = 0; i < get_num_threads(); i++)
387                 if (is_enabled(int_to_id(i)))
388                         return false;
389         return true;
390 }
391
392 /**
393  * @brief Assert a bug in the executing program.
394  *
395  * Use this function to assert any sort of bug in the user program. If the
396  * current trace is feasible (actually, a prefix of some feasible execution),
397  * then this execution will be aborted, printing the appropriate message. If
398  * the current trace is not yet feasible, the error message will be stashed and
399  * printed if the execution ever becomes feasible.
400  *
401  * @param msg Descriptive message for the bug (do not include newline char)
402  * @return True if bug is immediately-feasible
403  */
404 bool ModelChecker::assert_bug(const char *msg)
405 {
406         priv->bugs.push_back(new bug_message(msg));
407
408         if (isfeasibleprefix()) {
409                 set_assert();
410                 return true;
411         }
412         return false;
413 }
414
415 /**
416  * @brief Assert a bug in the executing program, asserted by a user thread
417  * @see ModelChecker::assert_bug
418  * @param msg Descriptive message for the bug (do not include newline char)
419  */
420 void ModelChecker::assert_user_bug(const char *msg)
421 {
422         /* If feasible bug, bail out now */
423         if (assert_bug(msg))
424                 switch_to_master(NULL);
425 }
426
427 /** @return True, if any bugs have been reported for this execution */
428 bool ModelChecker::have_bug_reports() const
429 {
430         return priv->bugs.size() != 0;
431 }
432
433 /** @brief Print bug report listing for this execution (if any bugs exist) */
434 void ModelChecker::print_bugs() const
435 {
436         if (have_bug_reports()) {
437                 model_print("Bug report: %zu bug%s detected\n",
438                                 priv->bugs.size(),
439                                 priv->bugs.size() > 1 ? "s" : "");
440                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
441                         priv->bugs[i]->print();
442         }
443 }
444
445 /**
446  * @brief Record end-of-execution stats
447  *
448  * Must be run when exiting an execution. Records various stats.
449  * @see struct execution_stats
450  */
451 void ModelChecker::record_stats()
452 {
453         stats.num_total++;
454         if (!isfeasibleprefix())
455                 stats.num_infeasible++;
456         else if (have_bug_reports())
457                 stats.num_buggy_executions++;
458         else if (is_complete_execution())
459                 stats.num_complete++;
460         else
461                 stats.num_redundant++;
462 }
463
464 /** @brief Print execution stats */
465 void ModelChecker::print_stats() const
466 {
467         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
468         model_print("Number of redundant executions: %d\n", stats.num_redundant);
469         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
470         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
471         model_print("Total executions: %d\n", stats.num_total);
472         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
473 }
474
475 /**
476  * @brief End-of-exeuction print
477  * @param printbugs Should any existing bugs be printed?
478  */
479 void ModelChecker::print_execution(bool printbugs) const
480 {
481         print_program_output();
482
483         if (DBG_ENABLED() || params.verbose) {
484                 model_print("Earliest divergence point since last feasible execution:\n");
485                 if (earliest_diverge)
486                         earliest_diverge->print();
487                 else
488                         model_print("(Not set)\n");
489
490                 model_print("\n");
491                 print_stats();
492         }
493
494         /* Don't print invalid bugs */
495         if (printbugs)
496                 print_bugs();
497
498         model_print("\n");
499         print_summary();
500 }
501
502 /**
503  * Queries the model-checker for more executions to explore and, if one
504  * exists, resets the model-checker state to execute a new execution.
505  *
506  * @return If there are more executions to explore, return true. Otherwise,
507  * return false.
508  */
509 bool ModelChecker::next_execution()
510 {
511         DBG();
512         /* Is this execution a feasible execution that's worth bug-checking? */
513         bool complete = isfeasibleprefix() && (is_complete_execution() ||
514                         have_bug_reports());
515
516         /* End-of-execution bug checks */
517         if (complete) {
518                 if (is_deadlocked())
519                         assert_bug("Deadlock detected");
520
521                 checkDataRaces();
522         }
523
524         record_stats();
525
526         /* Output */
527         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
528                 print_execution(complete);
529         else
530                 clear_program_output();
531
532         if (complete)
533                 earliest_diverge = NULL;
534
535         if ((diverge = get_next_backtrack()) == NULL)
536                 return false;
537
538         if (DBG_ENABLED()) {
539                 model_print("Next execution will diverge at:\n");
540                 diverge->print();
541         }
542
543         reset_to_initial_state();
544         return true;
545 }
546
547 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
548 {
549         switch (act->get_type()) {
550         case ATOMIC_FENCE:
551         case ATOMIC_READ:
552         case ATOMIC_WRITE:
553         case ATOMIC_RMW: {
554                 /* Optimization: relaxed operations don't need backtracking */
555                 if (act->is_relaxed())
556                         return NULL;
557                 /* linear search: from most recent to oldest */
558                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
559                 action_list_t::reverse_iterator rit;
560                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
561                         ModelAction *prev = *rit;
562                         if (prev->could_synchronize_with(act))
563                                 return prev;
564                 }
565                 break;
566         }
567         case ATOMIC_LOCK:
568         case ATOMIC_TRYLOCK: {
569                 /* linear search: from most recent to oldest */
570                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
571                 action_list_t::reverse_iterator rit;
572                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
573                         ModelAction *prev = *rit;
574                         if (act->is_conflicting_lock(prev))
575                                 return prev;
576                 }
577                 break;
578         }
579         case ATOMIC_UNLOCK: {
580                 /* linear search: from most recent to oldest */
581                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
582                 action_list_t::reverse_iterator rit;
583                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
584                         ModelAction *prev = *rit;
585                         if (!act->same_thread(prev) && prev->is_failed_trylock())
586                                 return prev;
587                 }
588                 break;
589         }
590         case ATOMIC_WAIT: {
591                 /* linear search: from most recent to oldest */
592                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
593                 action_list_t::reverse_iterator rit;
594                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
595                         ModelAction *prev = *rit;
596                         if (!act->same_thread(prev) && prev->is_failed_trylock())
597                                 return prev;
598                         if (!act->same_thread(prev) && prev->is_notify())
599                                 return prev;
600                 }
601                 break;
602         }
603
604         case ATOMIC_NOTIFY_ALL:
605         case ATOMIC_NOTIFY_ONE: {
606                 /* linear search: from most recent to oldest */
607                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
608                 action_list_t::reverse_iterator rit;
609                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
610                         ModelAction *prev = *rit;
611                         if (!act->same_thread(prev) && prev->is_wait())
612                                 return prev;
613                 }
614                 break;
615         }
616         default:
617                 break;
618         }
619         return NULL;
620 }
621
622 /** This method finds backtracking points where we should try to
623  * reorder the parameter ModelAction against.
624  *
625  * @param the ModelAction to find backtracking points for.
626  */
627 void ModelChecker::set_backtracking(ModelAction *act)
628 {
629         Thread *t = get_thread(act);
630         ModelAction *prev = get_last_conflict(act);
631         if (prev == NULL)
632                 return;
633
634         Node *node = prev->get_node()->get_parent();
635
636         int low_tid, high_tid;
637         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
638                 low_tid = id_to_int(act->get_tid());
639                 high_tid = low_tid + 1;
640         } else {
641                 low_tid = 0;
642                 high_tid = get_num_threads();
643         }
644
645         for (int i = low_tid; i < high_tid; i++) {
646                 thread_id_t tid = int_to_id(i);
647
648                 /* Make sure this thread can be enabled here. */
649                 if (i >= node->get_num_threads())
650                         break;
651
652                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
653                 if (node->enabled_status(tid) != THREAD_ENABLED)
654                         continue;
655
656                 /* Check if this has been explored already */
657                 if (node->has_been_explored(tid))
658                         continue;
659
660                 /* See if fairness allows */
661                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
662                         bool unfair = false;
663                         for (int t = 0; t < node->get_num_threads(); t++) {
664                                 thread_id_t tother = int_to_id(t);
665                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
666                                         unfair = true;
667                                         break;
668                                 }
669                         }
670                         if (unfair)
671                                 continue;
672                 }
673                 /* Cache the latest backtracking point */
674                 set_latest_backtrack(prev);
675
676                 /* If this is a new backtracking point, mark the tree */
677                 if (!node->set_backtrack(tid))
678                         continue;
679                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
680                                         id_to_int(prev->get_tid()),
681                                         id_to_int(t->get_id()));
682                 if (DBG_ENABLED()) {
683                         prev->print();
684                         act->print();
685                 }
686         }
687 }
688
689 /**
690  * @brief Cache the a backtracking point as the "most recent", if eligible
691  *
692  * Note that this does not prepare the NodeStack for this backtracking
693  * operation, it only caches the action on a per-execution basis
694  *
695  * @param act The operation at which we should explore a different next action
696  * (i.e., backtracking point)
697  * @return True, if this action is now the most recent backtracking point;
698  * false otherwise
699  */
700 bool ModelChecker::set_latest_backtrack(ModelAction *act)
701 {
702         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
703                 priv->next_backtrack = act;
704                 return true;
705         }
706         return false;
707 }
708
709 /**
710  * Returns last backtracking point. The model checker will explore a different
711  * path for this point in the next execution.
712  * @return The ModelAction at which the next execution should diverge.
713  */
714 ModelAction * ModelChecker::get_next_backtrack()
715 {
716         ModelAction *next = priv->next_backtrack;
717         priv->next_backtrack = NULL;
718         return next;
719 }
720
721 /**
722  * Processes a read or rmw model action.
723  * @param curr is the read model action to process.
724  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
725  * @return True if processing this read updates the mo_graph.
726  */
727 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
728 {
729         uint64_t value = VALUE_NONE;
730         bool updated = false;
731         while (true) {
732                 const ModelAction *reads_from = curr->get_node()->get_read_from();
733                 if (reads_from != NULL) {
734                         mo_graph->startChanges();
735
736                         value = reads_from->get_value();
737                         bool r_status = false;
738
739                         if (!second_part_of_rmw) {
740                                 check_recency(curr, reads_from);
741                                 r_status = r_modification_order(curr, reads_from);
742                         }
743
744                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
745                                 mo_graph->rollbackChanges();
746                                 priv->too_many_reads = false;
747                                 continue;
748                         }
749
750                         read_from(curr, reads_from);
751                         mo_graph->commitChanges();
752                         mo_check_promises(curr, true);
753
754                         updated |= r_status;
755                 } else if (!second_part_of_rmw) {
756                         /* Read from future value */
757                         struct future_value fv = curr->get_node()->get_future_value();
758                         Promise *promise = new Promise(curr, fv);
759                         value = fv.value;
760                         curr->set_read_from_promise(promise);
761                         promises->push_back(promise);
762                         mo_graph->startChanges();
763                         updated = r_modification_order(curr, promise);
764                         mo_graph->commitChanges();
765                 }
766                 get_thread(curr)->set_return_value(value);
767                 return updated;
768         }
769 }
770
771 /**
772  * Processes a lock, trylock, or unlock model action.  @param curr is
773  * the read model action to process.
774  *
775  * The try lock operation checks whether the lock is taken.  If not,
776  * it falls to the normal lock operation case.  If so, it returns
777  * fail.
778  *
779  * The lock operation has already been checked that it is enabled, so
780  * it just grabs the lock and synchronizes with the previous unlock.
781  *
782  * The unlock operation has to re-enable all of the threads that are
783  * waiting on the lock.
784  *
785  * @return True if synchronization was updated; false otherwise
786  */
787 bool ModelChecker::process_mutex(ModelAction *curr)
788 {
789         std::mutex *mutex = NULL;
790         struct std::mutex_state *state = NULL;
791
792         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
793                 mutex = (std::mutex *)curr->get_location();
794                 state = mutex->get_state();
795         } else if (curr->is_wait()) {
796                 mutex = (std::mutex *)curr->get_value();
797                 state = mutex->get_state();
798         }
799
800         switch (curr->get_type()) {
801         case ATOMIC_TRYLOCK: {
802                 bool success = !state->islocked;
803                 curr->set_try_lock(success);
804                 if (!success) {
805                         get_thread(curr)->set_return_value(0);
806                         break;
807                 }
808                 get_thread(curr)->set_return_value(1);
809         }
810                 //otherwise fall into the lock case
811         case ATOMIC_LOCK: {
812                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
813                         assert_bug("Lock access before initialization");
814                 state->islocked = true;
815                 ModelAction *unlock = get_last_unlock(curr);
816                 //synchronize with the previous unlock statement
817                 if (unlock != NULL) {
818                         curr->synchronize_with(unlock);
819                         return true;
820                 }
821                 break;
822         }
823         case ATOMIC_UNLOCK: {
824                 //unlock the lock
825                 state->islocked = false;
826                 //wake up the other threads
827                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
828                 //activate all the waiting threads
829                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
830                         scheduler->wake(get_thread(*rit));
831                 }
832                 waiters->clear();
833                 break;
834         }
835         case ATOMIC_WAIT: {
836                 //unlock the lock
837                 state->islocked = false;
838                 //wake up the other threads
839                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
840                 //activate all the waiting threads
841                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
842                         scheduler->wake(get_thread(*rit));
843                 }
844                 waiters->clear();
845                 //check whether we should go to sleep or not...simulate spurious failures
846                 if (curr->get_node()->get_misc() == 0) {
847                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
848                         //disable us
849                         scheduler->sleep(get_thread(curr));
850                 }
851                 break;
852         }
853         case ATOMIC_NOTIFY_ALL: {
854                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
855                 //activate all the waiting threads
856                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
857                         scheduler->wake(get_thread(*rit));
858                 }
859                 waiters->clear();
860                 break;
861         }
862         case ATOMIC_NOTIFY_ONE: {
863                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
864                 int wakeupthread = curr->get_node()->get_misc();
865                 action_list_t::iterator it = waiters->begin();
866                 advance(it, wakeupthread);
867                 scheduler->wake(get_thread(*it));
868                 waiters->erase(it);
869                 break;
870         }
871
872         default:
873                 ASSERT(0);
874         }
875         return false;
876 }
877
878 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
879 {
880         /* Do more ambitious checks now that mo is more complete */
881         if (mo_may_allow(writer, reader)) {
882                 Node *node = reader->get_node();
883
884                 /* Find an ancestor thread which exists at the time of the reader */
885                 Thread *write_thread = get_thread(writer);
886                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
887                         write_thread = write_thread->get_parent();
888
889                 struct future_value fv = {
890                         writer->get_value(),
891                         writer->get_seq_number() + params.maxfuturedelay,
892                         write_thread->get_id(),
893                 };
894                 if (node->add_future_value(fv))
895                         set_latest_backtrack(reader);
896         }
897 }
898
899 /**
900  * Process a write ModelAction
901  * @param curr The ModelAction to process
902  * @return True if the mo_graph was updated or promises were resolved
903  */
904 bool ModelChecker::process_write(ModelAction *curr)
905 {
906         bool updated_mod_order = w_modification_order(curr);
907         bool updated_promises = resolve_promises(curr);
908
909         if (promises->size() == 0) {
910                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
911                         struct PendingFutureValue pfv = (*futurevalues)[i];
912                         add_future_value(pfv.writer, pfv.act);
913                 }
914                 futurevalues->clear();
915         }
916
917         mo_graph->commitChanges();
918         mo_check_promises(curr, false);
919
920         get_thread(curr)->set_return_value(VALUE_NONE);
921         return updated_mod_order || updated_promises;
922 }
923
924 /**
925  * Process a fence ModelAction
926  * @param curr The ModelAction to process
927  * @return True if synchronization was updated
928  */
929 bool ModelChecker::process_fence(ModelAction *curr)
930 {
931         /*
932          * fence-relaxed: no-op
933          * fence-release: only log the occurence (not in this function), for
934          *   use in later synchronization
935          * fence-acquire (this function): search for hypothetical release
936          *   sequences
937          */
938         bool updated = false;
939         if (curr->is_acquire()) {
940                 action_list_t *list = action_trace;
941                 action_list_t::reverse_iterator rit;
942                 /* Find X : is_read(X) && X --sb-> curr */
943                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
944                         ModelAction *act = *rit;
945                         if (act == curr)
946                                 continue;
947                         if (act->get_tid() != curr->get_tid())
948                                 continue;
949                         /* Stop at the beginning of the thread */
950                         if (act->is_thread_start())
951                                 break;
952                         /* Stop once we reach a prior fence-acquire */
953                         if (act->is_fence() && act->is_acquire())
954                                 break;
955                         if (!act->is_read())
956                                 continue;
957                         /* read-acquire will find its own release sequences */
958                         if (act->is_acquire())
959                                 continue;
960
961                         /* Establish hypothetical release sequences */
962                         rel_heads_list_t release_heads;
963                         get_release_seq_heads(curr, act, &release_heads);
964                         for (unsigned int i = 0; i < release_heads.size(); i++)
965                                 if (!curr->synchronize_with(release_heads[i]))
966                                         set_bad_synchronization();
967                         if (release_heads.size() != 0)
968                                 updated = true;
969                 }
970         }
971         return updated;
972 }
973
974 /**
975  * @brief Process the current action for thread-related activity
976  *
977  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
978  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
979  * synchronization, etc.  This function is a no-op for non-THREAD actions
980  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
981  *
982  * @param curr The current action
983  * @return True if synchronization was updated or a thread completed
984  */
985 bool ModelChecker::process_thread_action(ModelAction *curr)
986 {
987         bool updated = false;
988
989         switch (curr->get_type()) {
990         case THREAD_CREATE: {
991                 thrd_t *thrd = (thrd_t *)curr->get_location();
992                 struct thread_params *params = (struct thread_params *)curr->get_value();
993                 Thread *th = new Thread(thrd, params->func, params->arg);
994                 add_thread(th);
995                 th->set_creation(curr);
996                 /* Promises can be satisfied by children */
997                 for (unsigned int i = 0; i < promises->size(); i++) {
998                         Promise *promise = (*promises)[i];
999                         if (promise->thread_is_available(curr->get_tid()))
1000                                 promise->add_thread(th->get_id());
1001                 }
1002                 break;
1003         }
1004         case THREAD_JOIN: {
1005                 Thread *blocking = curr->get_thread_operand();
1006                 ModelAction *act = get_last_action(blocking->get_id());
1007                 curr->synchronize_with(act);
1008                 updated = true; /* trigger rel-seq checks */
1009                 break;
1010         }
1011         case THREAD_FINISH: {
1012                 Thread *th = get_thread(curr);
1013                 while (!th->wait_list_empty()) {
1014                         ModelAction *act = th->pop_wait_list();
1015                         scheduler->wake(get_thread(act));
1016                 }
1017                 th->complete();
1018                 /* Completed thread can't satisfy promises */
1019                 for (unsigned int i = 0; i < promises->size(); i++) {
1020                         Promise *promise = (*promises)[i];
1021                         if (promise->thread_is_available(th->get_id()))
1022                                 if (promise->eliminate_thread(th->get_id()))
1023                                         priv->failed_promise = true;
1024                 }
1025                 updated = true; /* trigger rel-seq checks */
1026                 break;
1027         }
1028         case THREAD_START: {
1029                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1030                 break;
1031         }
1032         default:
1033                 break;
1034         }
1035
1036         return updated;
1037 }
1038
1039 /**
1040  * @brief Process the current action for release sequence fixup activity
1041  *
1042  * Performs model-checker release sequence fixups for the current action,
1043  * forcing a single pending release sequence to break (with a given, potential
1044  * "loose" write) or to complete (i.e., synchronize). If a pending release
1045  * sequence forms a complete release sequence, then we must perform the fixup
1046  * synchronization, mo_graph additions, etc.
1047  *
1048  * @param curr The current action; must be a release sequence fixup action
1049  * @param work_queue The work queue to which to add work items as they are
1050  * generated
1051  */
1052 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1053 {
1054         const ModelAction *write = curr->get_node()->get_relseq_break();
1055         struct release_seq *sequence = pending_rel_seqs->back();
1056         pending_rel_seqs->pop_back();
1057         ASSERT(sequence);
1058         ModelAction *acquire = sequence->acquire;
1059         const ModelAction *rf = sequence->rf;
1060         const ModelAction *release = sequence->release;
1061         ASSERT(acquire);
1062         ASSERT(release);
1063         ASSERT(rf);
1064         ASSERT(release->same_thread(rf));
1065
1066         if (write == NULL) {
1067                 /**
1068                  * @todo Forcing a synchronization requires that we set
1069                  * modification order constraints. For instance, we can't allow
1070                  * a fixup sequence in which two separate read-acquire
1071                  * operations read from the same sequence, where the first one
1072                  * synchronizes and the other doesn't. Essentially, we can't
1073                  * allow any writes to insert themselves between 'release' and
1074                  * 'rf'
1075                  */
1076
1077                 /* Must synchronize */
1078                 if (!acquire->synchronize_with(release)) {
1079                         set_bad_synchronization();
1080                         return;
1081                 }
1082                 /* Re-check all pending release sequences */
1083                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1084                 /* Re-check act for mo_graph edges */
1085                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1086
1087                 /* propagate synchronization to later actions */
1088                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1089                 for (; (*rit) != acquire; rit++) {
1090                         ModelAction *propagate = *rit;
1091                         if (acquire->happens_before(propagate)) {
1092                                 propagate->synchronize_with(acquire);
1093                                 /* Re-check 'propagate' for mo_graph edges */
1094                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1095                         }
1096                 }
1097         } else {
1098                 /* Break release sequence with new edges:
1099                  *   release --mo--> write --mo--> rf */
1100                 mo_graph->addEdge(release, write);
1101                 mo_graph->addEdge(write, rf);
1102         }
1103
1104         /* See if we have realized a data race */
1105         checkDataRaces();
1106 }
1107
1108 /**
1109  * Initialize the current action by performing one or more of the following
1110  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1111  * in the NodeStack, manipulating backtracking sets, allocating and
1112  * initializing clock vectors, and computing the promises to fulfill.
1113  *
1114  * @param curr The current action, as passed from the user context; may be
1115  * freed/invalidated after the execution of this function, with a different
1116  * action "returned" its place (pass-by-reference)
1117  * @return True if curr is a newly-explored action; false otherwise
1118  */
1119 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1120 {
1121         ModelAction *newcurr;
1122
1123         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1124                 newcurr = process_rmw(*curr);
1125                 delete *curr;
1126
1127                 if (newcurr->is_rmw())
1128                         compute_promises(newcurr);
1129
1130                 *curr = newcurr;
1131                 return false;
1132         }
1133
1134         (*curr)->set_seq_number(get_next_seq_num());
1135
1136         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1137         if (newcurr) {
1138                 /* First restore type and order in case of RMW operation */
1139                 if ((*curr)->is_rmwr())
1140                         newcurr->copy_typeandorder(*curr);
1141
1142                 ASSERT((*curr)->get_location() == newcurr->get_location());
1143                 newcurr->copy_from_new(*curr);
1144
1145                 /* Discard duplicate ModelAction; use action from NodeStack */
1146                 delete *curr;
1147
1148                 /* Always compute new clock vector */
1149                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1150
1151                 *curr = newcurr;
1152                 return false; /* Action was explored previously */
1153         } else {
1154                 newcurr = *curr;
1155
1156                 /* Always compute new clock vector */
1157                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1158
1159                 /* Assign most recent release fence */
1160                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1161
1162                 /*
1163                  * Perform one-time actions when pushing new ModelAction onto
1164                  * NodeStack
1165                  */
1166                 if (newcurr->is_write())
1167                         compute_promises(newcurr);
1168                 else if (newcurr->is_relseq_fixup())
1169                         compute_relseq_breakwrites(newcurr);
1170                 else if (newcurr->is_wait())
1171                         newcurr->get_node()->set_misc_max(2);
1172                 else if (newcurr->is_notify_one()) {
1173                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1174                 }
1175                 return true; /* This was a new ModelAction */
1176         }
1177 }
1178
1179 /**
1180  * @brief Establish reads-from relation between two actions
1181  *
1182  * Perform basic operations involved with establishing a concrete rf relation,
1183  * including setting the ModelAction data and checking for release sequences.
1184  *
1185  * @param act The action that is reading (must be a read)
1186  * @param rf The action from which we are reading (must be a write)
1187  *
1188  * @return True if this read established synchronization
1189  */
1190 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1191 {
1192         act->set_read_from(rf);
1193         if (rf != NULL && act->is_acquire()) {
1194                 rel_heads_list_t release_heads;
1195                 get_release_seq_heads(act, act, &release_heads);
1196                 int num_heads = release_heads.size();
1197                 for (unsigned int i = 0; i < release_heads.size(); i++)
1198                         if (!act->synchronize_with(release_heads[i])) {
1199                                 set_bad_synchronization();
1200                                 num_heads--;
1201                         }
1202                 return num_heads > 0;
1203         }
1204         return false;
1205 }
1206
1207 /**
1208  * @brief Check whether a model action is enabled.
1209  *
1210  * Checks whether a lock or join operation would be successful (i.e., is the
1211  * lock already locked, or is the joined thread already complete). If not, put
1212  * the action in a waiter list.
1213  *
1214  * @param curr is the ModelAction to check whether it is enabled.
1215  * @return a bool that indicates whether the action is enabled.
1216  */
1217 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1218         if (curr->is_lock()) {
1219                 std::mutex *lock = (std::mutex *)curr->get_location();
1220                 struct std::mutex_state *state = lock->get_state();
1221                 if (state->islocked) {
1222                         //Stick the action in the appropriate waiting queue
1223                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1224                         return false;
1225                 }
1226         } else if (curr->get_type() == THREAD_JOIN) {
1227                 Thread *blocking = (Thread *)curr->get_location();
1228                 if (!blocking->is_complete()) {
1229                         blocking->push_wait_list(curr);
1230                         return false;
1231                 }
1232         }
1233
1234         return true;
1235 }
1236
1237 /**
1238  * This is the heart of the model checker routine. It performs model-checking
1239  * actions corresponding to a given "current action." Among other processes, it
1240  * calculates reads-from relationships, updates synchronization clock vectors,
1241  * forms a memory_order constraints graph, and handles replay/backtrack
1242  * execution when running permutations of previously-observed executions.
1243  *
1244  * @param curr The current action to process
1245  * @return The ModelAction that is actually executed; may be different than
1246  * curr; may be NULL, if the current action is not enabled to run
1247  */
1248 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1249 {
1250         ASSERT(curr);
1251         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1252
1253         if (!check_action_enabled(curr)) {
1254                 /* Make the execution look like we chose to run this action
1255                  * much later, when a lock/join can succeed */
1256                 get_thread(curr)->set_pending(curr);
1257                 scheduler->sleep(get_thread(curr));
1258                 return NULL;
1259         }
1260
1261         bool newly_explored = initialize_curr_action(&curr);
1262
1263         DBG();
1264         if (DBG_ENABLED())
1265                 curr->print();
1266
1267         wake_up_sleeping_actions(curr);
1268
1269         /* Add the action to lists before any other model-checking tasks */
1270         if (!second_part_of_rmw)
1271                 add_action_to_lists(curr);
1272
1273         /* Build may_read_from set for newly-created actions */
1274         if (newly_explored && curr->is_read())
1275                 build_reads_from_past(curr);
1276
1277         /* Initialize work_queue with the "current action" work */
1278         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1279         while (!work_queue.empty() && !has_asserted()) {
1280                 WorkQueueEntry work = work_queue.front();
1281                 work_queue.pop_front();
1282
1283                 switch (work.type) {
1284                 case WORK_CHECK_CURR_ACTION: {
1285                         ModelAction *act = work.action;
1286                         bool update = false; /* update this location's release seq's */
1287                         bool update_all = false; /* update all release seq's */
1288
1289                         if (process_thread_action(curr))
1290                                 update_all = true;
1291
1292                         if (act->is_read() && process_read(act, second_part_of_rmw))
1293                                 update = true;
1294
1295                         if (act->is_write() && process_write(act))
1296                                 update = true;
1297
1298                         if (act->is_fence() && process_fence(act))
1299                                 update_all = true;
1300
1301                         if (act->is_mutex_op() && process_mutex(act))
1302                                 update_all = true;
1303
1304                         if (act->is_relseq_fixup())
1305                                 process_relseq_fixup(curr, &work_queue);
1306
1307                         if (update_all)
1308                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1309                         else if (update)
1310                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1311                         break;
1312                 }
1313                 case WORK_CHECK_RELEASE_SEQ:
1314                         resolve_release_sequences(work.location, &work_queue);
1315                         break;
1316                 case WORK_CHECK_MO_EDGES: {
1317                         /** @todo Complete verification of work_queue */
1318                         ModelAction *act = work.action;
1319                         bool updated = false;
1320
1321                         if (act->is_read()) {
1322                                 const ModelAction *rf = act->get_reads_from();
1323                                 const Promise *promise = act->get_reads_from_promise();
1324                                 if (rf) {
1325                                         if (r_modification_order(act, rf))
1326                                                 updated = true;
1327                                 } else if (promise) {
1328                                         if (r_modification_order(act, promise))
1329                                                 updated = true;
1330                                 }
1331                         }
1332                         if (act->is_write()) {
1333                                 if (w_modification_order(act))
1334                                         updated = true;
1335                         }
1336                         mo_graph->commitChanges();
1337
1338                         if (updated)
1339                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1340                         break;
1341                 }
1342                 default:
1343                         ASSERT(false);
1344                         break;
1345                 }
1346         }
1347
1348         check_curr_backtracking(curr);
1349         set_backtracking(curr);
1350         return curr;
1351 }
1352
1353 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1354 {
1355         Node *currnode = curr->get_node();
1356         Node *parnode = currnode->get_parent();
1357
1358         if ((parnode && !parnode->backtrack_empty()) ||
1359                          !currnode->misc_empty() ||
1360                          !currnode->read_from_empty() ||
1361                          !currnode->future_value_empty() ||
1362                          !currnode->promise_empty() ||
1363                          !currnode->relseq_break_empty()) {
1364                 set_latest_backtrack(curr);
1365         }
1366 }
1367
1368 bool ModelChecker::promises_expired() const
1369 {
1370         for (unsigned int i = 0; i < promises->size(); i++) {
1371                 Promise *promise = (*promises)[i];
1372                 if (promise->get_expiration() < priv->used_sequence_numbers)
1373                         return true;
1374         }
1375         return false;
1376 }
1377
1378 /**
1379  * This is the strongest feasibility check available.
1380  * @return whether the current trace (partial or complete) must be a prefix of
1381  * a feasible trace.
1382  */
1383 bool ModelChecker::isfeasibleprefix() const
1384 {
1385         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1386 }
1387
1388 /**
1389  * Print disagnostic information about an infeasible execution
1390  * @param prefix A string to prefix the output with; if NULL, then a default
1391  * message prefix will be provided
1392  */
1393 void ModelChecker::print_infeasibility(const char *prefix) const
1394 {
1395         char buf[100];
1396         char *ptr = buf;
1397         if (mo_graph->checkForCycles())
1398                 ptr += sprintf(ptr, "[mo cycle]");
1399         if (priv->failed_promise)
1400                 ptr += sprintf(ptr, "[failed promise]");
1401         if (priv->too_many_reads)
1402                 ptr += sprintf(ptr, "[too many reads]");
1403         if (priv->no_valid_reads)
1404                 ptr += sprintf(ptr, "[no valid reads-from]");
1405         if (priv->bad_synchronization)
1406                 ptr += sprintf(ptr, "[bad sw ordering]");
1407         if (promises_expired())
1408                 ptr += sprintf(ptr, "[promise expired]");
1409         if (promises->size() != 0)
1410                 ptr += sprintf(ptr, "[unresolved promise]");
1411         if (ptr != buf)
1412                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1413 }
1414
1415 /**
1416  * Returns whether the current completed trace is feasible, except for pending
1417  * release sequences.
1418  */
1419 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1420 {
1421         return !is_infeasible() && promises->size() == 0;
1422 }
1423
1424 /**
1425  * Check if the current partial trace is infeasible. Does not check any
1426  * end-of-execution flags, which might rule out the execution. Thus, this is
1427  * useful only for ruling an execution as infeasible.
1428  * @return whether the current partial trace is infeasible.
1429  */
1430 bool ModelChecker::is_infeasible() const
1431 {
1432         return mo_graph->checkForCycles() ||
1433                 priv->no_valid_reads ||
1434                 priv->failed_promise ||
1435                 priv->too_many_reads ||
1436                 priv->bad_synchronization ||
1437                 promises_expired();
1438 }
1439
1440 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1441 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1442         ModelAction *lastread = get_last_action(act->get_tid());
1443         lastread->process_rmw(act);
1444         if (act->is_rmw()) {
1445                 if (lastread->get_reads_from())
1446                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1447                 else
1448                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1449                 mo_graph->commitChanges();
1450         }
1451         return lastread;
1452 }
1453
1454 /**
1455  * Checks whether a thread has read from the same write for too many times
1456  * without seeing the effects of a later write.
1457  *
1458  * Basic idea:
1459  * 1) there must a different write that we could read from that would satisfy the modification order,
1460  * 2) we must have read from the same value in excess of maxreads times, and
1461  * 3) that other write must have been in the reads_from set for maxreads times.
1462  *
1463  * If so, we decide that the execution is no longer feasible.
1464  */
1465 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1466 {
1467         if (params.maxreads != 0) {
1468                 if (curr->get_node()->get_read_from_size() <= 1)
1469                         return;
1470                 //Must make sure that execution is currently feasible...  We could
1471                 //accidentally clear by rolling back
1472                 if (is_infeasible())
1473                         return;
1474                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1475                 int tid = id_to_int(curr->get_tid());
1476
1477                 /* Skip checks */
1478                 if ((int)thrd_lists->size() <= tid)
1479                         return;
1480                 action_list_t *list = &(*thrd_lists)[tid];
1481
1482                 action_list_t::reverse_iterator rit = list->rbegin();
1483                 /* Skip past curr */
1484                 for (; (*rit) != curr; rit++)
1485                         ;
1486                 /* go past curr now */
1487                 rit++;
1488
1489                 action_list_t::reverse_iterator ritcopy = rit;
1490                 //See if we have enough reads from the same value
1491                 int count = 0;
1492                 for (; count < params.maxreads; rit++, count++) {
1493                         if (rit == list->rend())
1494                                 return;
1495                         ModelAction *act = *rit;
1496                         if (!act->is_read())
1497                                 return;
1498
1499                         if (act->get_reads_from() != rf)
1500                                 return;
1501                         if (act->get_node()->get_read_from_size() <= 1)
1502                                 return;
1503                 }
1504                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1505                         /* Get write */
1506                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1507
1508                         /* Need a different write */
1509                         if (write == rf)
1510                                 continue;
1511
1512                         /* Test to see whether this is a feasible write to read from */
1513                         /** NOTE: all members of read-from set should be
1514                          *  feasible, so we no longer check it here **/
1515
1516                         rit = ritcopy;
1517
1518                         bool feasiblewrite = true;
1519                         //new we need to see if this write works for everyone
1520
1521                         for (int loop = count; loop > 0; loop--, rit++) {
1522                                 ModelAction *act = *rit;
1523                                 bool foundvalue = false;
1524                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1525                                         if (act->get_node()->get_read_from_at(j) == write) {
1526                                                 foundvalue = true;
1527                                                 break;
1528                                         }
1529                                 }
1530                                 if (!foundvalue) {
1531                                         feasiblewrite = false;
1532                                         break;
1533                                 }
1534                         }
1535                         if (feasiblewrite) {
1536                                 priv->too_many_reads = true;
1537                                 return;
1538                         }
1539                 }
1540         }
1541 }
1542
1543 /**
1544  * Updates the mo_graph with the constraints imposed from the current
1545  * read.
1546  *
1547  * Basic idea is the following: Go through each other thread and find
1548  * the last action that happened before our read.  Two cases:
1549  *
1550  * (1) The action is a write => that write must either occur before
1551  * the write we read from or be the write we read from.
1552  *
1553  * (2) The action is a read => the write that that action read from
1554  * must occur before the write we read from or be the same write.
1555  *
1556  * @param curr The current action. Must be a read.
1557  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1558  * @return True if modification order edges were added; false otherwise
1559  */
1560 template <typename rf_type>
1561 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1562 {
1563         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1564         unsigned int i;
1565         bool added = false;
1566         ASSERT(curr->is_read());
1567
1568         /* Last SC fence in the current thread */
1569         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1570
1571         /* Iterate over all threads */
1572         for (i = 0; i < thrd_lists->size(); i++) {
1573                 /* Last SC fence in thread i */
1574                 ModelAction *last_sc_fence_thread_local = NULL;
1575                 if (int_to_id((int)i) != curr->get_tid())
1576                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1577
1578                 /* Last SC fence in thread i, before last SC fence in current thread */
1579                 ModelAction *last_sc_fence_thread_before = NULL;
1580                 if (last_sc_fence_local)
1581                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1582
1583                 /* Iterate over actions in thread, starting from most recent */
1584                 action_list_t *list = &(*thrd_lists)[i];
1585                 action_list_t::reverse_iterator rit;
1586                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1587                         ModelAction *act = *rit;
1588
1589                         if (act->is_write() && !act->equals(rf) && act != curr) {
1590                                 /* C++, Section 29.3 statement 5 */
1591                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1592                                                 *act < *last_sc_fence_thread_local) {
1593                                         added = mo_graph->addEdge(act, rf) || added;
1594                                         break;
1595                                 }
1596                                 /* C++, Section 29.3 statement 4 */
1597                                 else if (act->is_seqcst() && last_sc_fence_local &&
1598                                                 *act < *last_sc_fence_local) {
1599                                         added = mo_graph->addEdge(act, rf) || added;
1600                                         break;
1601                                 }
1602                                 /* C++, Section 29.3 statement 6 */
1603                                 else if (last_sc_fence_thread_before &&
1604                                                 *act < *last_sc_fence_thread_before) {
1605                                         added = mo_graph->addEdge(act, rf) || added;
1606                                         break;
1607                                 }
1608                         }
1609
1610                         /*
1611                          * Include at most one act per-thread that "happens
1612                          * before" curr. Don't consider reflexively.
1613                          */
1614                         if (act->happens_before(curr) && act != curr) {
1615                                 if (act->is_write()) {
1616                                         if (!act->equals(rf)) {
1617                                                 added = mo_graph->addEdge(act, rf) || added;
1618                                         }
1619                                 } else {
1620                                         const ModelAction *prevreadfrom = act->get_reads_from();
1621                                         //if the previous read is unresolved, keep going...
1622                                         if (prevreadfrom == NULL)
1623                                                 continue;
1624
1625                                         if (!prevreadfrom->equals(rf)) {
1626                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1627                                         }
1628                                 }
1629                                 break;
1630                         }
1631                 }
1632         }
1633
1634         /*
1635          * All compatible, thread-exclusive promises must be ordered after any
1636          * concrete loads from the same thread
1637          */
1638         for (unsigned int i = 0; i < promises->size(); i++)
1639                 if ((*promises)[i]->is_compatible_exclusive(curr))
1640                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1641
1642         return added;
1643 }
1644
1645 /**
1646  * Updates the mo_graph with the constraints imposed from the current write.
1647  *
1648  * Basic idea is the following: Go through each other thread and find
1649  * the lastest action that happened before our write.  Two cases:
1650  *
1651  * (1) The action is a write => that write must occur before
1652  * the current write
1653  *
1654  * (2) The action is a read => the write that that action read from
1655  * must occur before the current write.
1656  *
1657  * This method also handles two other issues:
1658  *
1659  * (I) Sequential Consistency: Making sure that if the current write is
1660  * seq_cst, that it occurs after the previous seq_cst write.
1661  *
1662  * (II) Sending the write back to non-synchronizing reads.
1663  *
1664  * @param curr The current action. Must be a write.
1665  * @return True if modification order edges were added; false otherwise
1666  */
1667 bool ModelChecker::w_modification_order(ModelAction *curr)
1668 {
1669         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1670         unsigned int i;
1671         bool added = false;
1672         ASSERT(curr->is_write());
1673
1674         if (curr->is_seqcst()) {
1675                 /* We have to at least see the last sequentially consistent write,
1676                          so we are initialized. */
1677                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1678                 if (last_seq_cst != NULL) {
1679                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1680                 }
1681         }
1682
1683         /* Last SC fence in the current thread */
1684         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1685
1686         /* Iterate over all threads */
1687         for (i = 0; i < thrd_lists->size(); i++) {
1688                 /* Last SC fence in thread i, before last SC fence in current thread */
1689                 ModelAction *last_sc_fence_thread_before = NULL;
1690                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1691                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1692
1693                 /* Iterate over actions in thread, starting from most recent */
1694                 action_list_t *list = &(*thrd_lists)[i];
1695                 action_list_t::reverse_iterator rit;
1696                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1697                         ModelAction *act = *rit;
1698                         if (act == curr) {
1699                                 /*
1700                                  * 1) If RMW and it actually read from something, then we
1701                                  * already have all relevant edges, so just skip to next
1702                                  * thread.
1703                                  *
1704                                  * 2) If RMW and it didn't read from anything, we should
1705                                  * whatever edge we can get to speed up convergence.
1706                                  *
1707                                  * 3) If normal write, we need to look at earlier actions, so
1708                                  * continue processing list.
1709                                  */
1710                                 if (curr->is_rmw()) {
1711                                         if (curr->get_reads_from() != NULL)
1712                                                 break;
1713                                         else
1714                                                 continue;
1715                                 } else
1716                                         continue;
1717                         }
1718
1719                         /* C++, Section 29.3 statement 7 */
1720                         if (last_sc_fence_thread_before && act->is_write() &&
1721                                         *act < *last_sc_fence_thread_before) {
1722                                 added = mo_graph->addEdge(act, curr) || added;
1723                                 break;
1724                         }
1725
1726                         /*
1727                          * Include at most one act per-thread that "happens
1728                          * before" curr
1729                          */
1730                         if (act->happens_before(curr)) {
1731                                 /*
1732                                  * Note: if act is RMW, just add edge:
1733                                  *   act --mo--> curr
1734                                  * The following edge should be handled elsewhere:
1735                                  *   readfrom(act) --mo--> act
1736                                  */
1737                                 if (act->is_write())
1738                                         added = mo_graph->addEdge(act, curr) || added;
1739                                 else if (act->is_read()) {
1740                                         //if previous read accessed a null, just keep going
1741                                         if (act->get_reads_from() == NULL)
1742                                                 continue;
1743                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1744                                 }
1745                                 break;
1746                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1747                                                      !act->same_thread(curr)) {
1748                                 /* We have an action that:
1749                                    (1) did not happen before us
1750                                    (2) is a read and we are a write
1751                                    (3) cannot synchronize with us
1752                                    (4) is in a different thread
1753                                    =>
1754                                    that read could potentially read from our write.  Note that
1755                                    these checks are overly conservative at this point, we'll
1756                                    do more checks before actually removing the
1757                                    pendingfuturevalue.
1758
1759                                  */
1760                                 if (thin_air_constraint_may_allow(curr, act)) {
1761                                         if (!is_infeasible())
1762                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1763                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1764                                                 add_future_value(curr, act);
1765                                 }
1766                         }
1767                 }
1768         }
1769
1770         /*
1771          * All compatible, thread-exclusive promises must be ordered after any
1772          * concrete stores to the same thread, or else they can be merged with
1773          * this store later
1774          */
1775         for (unsigned int i = 0; i < promises->size(); i++)
1776                 if ((*promises)[i]->is_compatible_exclusive(curr))
1777                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1778
1779         return added;
1780 }
1781
1782 /** Arbitrary reads from the future are not allowed.  Section 29.3
1783  * part 9 places some constraints.  This method checks one result of constraint
1784  * constraint.  Others require compiler support. */
1785 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1786 {
1787         if (!writer->is_rmw())
1788                 return true;
1789
1790         if (!reader->is_rmw())
1791                 return true;
1792
1793         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1794                 if (search == reader)
1795                         return false;
1796                 if (search->get_tid() == reader->get_tid() &&
1797                                 search->happens_before(reader))
1798                         break;
1799         }
1800
1801         return true;
1802 }
1803
1804 /**
1805  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1806  * some constraints. This method checks one the following constraint (others
1807  * require compiler support):
1808  *
1809  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1810  */
1811 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1812 {
1813         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1814         unsigned int i;
1815         /* Iterate over all threads */
1816         for (i = 0; i < thrd_lists->size(); i++) {
1817                 const ModelAction *write_after_read = NULL;
1818
1819                 /* Iterate over actions in thread, starting from most recent */
1820                 action_list_t *list = &(*thrd_lists)[i];
1821                 action_list_t::reverse_iterator rit;
1822                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1823                         ModelAction *act = *rit;
1824
1825                         /* Don't disallow due to act == reader */
1826                         if (!reader->happens_before(act) || reader == act)
1827                                 break;
1828                         else if (act->is_write())
1829                                 write_after_read = act;
1830                         else if (act->is_read() && act->get_reads_from() != NULL)
1831                                 write_after_read = act->get_reads_from();
1832                 }
1833
1834                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1835                         return false;
1836         }
1837         return true;
1838 }
1839
1840 /**
1841  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1842  * The ModelAction under consideration is expected to be taking part in
1843  * release/acquire synchronization as an object of the "reads from" relation.
1844  * Note that this can only provide release sequence support for RMW chains
1845  * which do not read from the future, as those actions cannot be traced until
1846  * their "promise" is fulfilled. Similarly, we may not even establish the
1847  * presence of a release sequence with certainty, as some modification order
1848  * constraints may be decided further in the future. Thus, this function
1849  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1850  * and a boolean representing certainty.
1851  *
1852  * @param rf The action that might be part of a release sequence. Must be a
1853  * write.
1854  * @param release_heads A pass-by-reference style return parameter. After
1855  * execution of this function, release_heads will contain the heads of all the
1856  * relevant release sequences, if any exists with certainty
1857  * @param pending A pass-by-reference style return parameter which is only used
1858  * when returning false (i.e., uncertain). Returns most information regarding
1859  * an uncertain release sequence, including any write operations that might
1860  * break the sequence.
1861  * @return true, if the ModelChecker is certain that release_heads is complete;
1862  * false otherwise
1863  */
1864 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1865                 rel_heads_list_t *release_heads,
1866                 struct release_seq *pending) const
1867 {
1868         /* Only check for release sequences if there are no cycles */
1869         if (mo_graph->checkForCycles())
1870                 return false;
1871
1872         while (rf) {
1873                 ASSERT(rf->is_write());
1874
1875                 if (rf->is_release())
1876                         release_heads->push_back(rf);
1877                 else if (rf->get_last_fence_release())
1878                         release_heads->push_back(rf->get_last_fence_release());
1879                 if (!rf->is_rmw())
1880                         break; /* End of RMW chain */
1881
1882                 /** @todo Need to be smarter here...  In the linux lock
1883                  * example, this will run to the beginning of the program for
1884                  * every acquire. */
1885                 /** @todo The way to be smarter here is to keep going until 1
1886                  * thread has a release preceded by an acquire and you've seen
1887                  *       both. */
1888
1889                 /* acq_rel RMW is a sufficient stopping condition */
1890                 if (rf->is_acquire() && rf->is_release())
1891                         return true; /* complete */
1892
1893                 rf = rf->get_reads_from();
1894         };
1895         if (!rf) {
1896                 /* read from future: need to settle this later */
1897                 pending->rf = NULL;
1898                 return false; /* incomplete */
1899         }
1900
1901         if (rf->is_release())
1902                 return true; /* complete */
1903
1904         /* else relaxed write
1905          * - check for fence-release in the same thread (29.8, stmt. 3)
1906          * - check modification order for contiguous subsequence
1907          *   -> rf must be same thread as release */
1908
1909         const ModelAction *fence_release = rf->get_last_fence_release();
1910         /* Synchronize with a fence-release unconditionally; we don't need to
1911          * find any more "contiguous subsequence..." for it */
1912         if (fence_release)
1913                 release_heads->push_back(fence_release);
1914
1915         int tid = id_to_int(rf->get_tid());
1916         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1917         action_list_t *list = &(*thrd_lists)[tid];
1918         action_list_t::const_reverse_iterator rit;
1919
1920         /* Find rf in the thread list */
1921         rit = std::find(list->rbegin(), list->rend(), rf);
1922         ASSERT(rit != list->rend());
1923
1924         /* Find the last {write,fence}-release */
1925         for (; rit != list->rend(); rit++) {
1926                 if (fence_release && *(*rit) < *fence_release)
1927                         break;
1928                 if ((*rit)->is_release())
1929                         break;
1930         }
1931         if (rit == list->rend()) {
1932                 /* No write-release in this thread */
1933                 return true; /* complete */
1934         } else if (fence_release && *(*rit) < *fence_release) {
1935                 /* The fence-release is more recent (and so, "stronger") than
1936                  * the most recent write-release */
1937                 return true; /* complete */
1938         } /* else, need to establish contiguous release sequence */
1939         ModelAction *release = *rit;
1940
1941         ASSERT(rf->same_thread(release));
1942
1943         pending->writes.clear();
1944
1945         bool certain = true;
1946         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1947                 if (id_to_int(rf->get_tid()) == (int)i)
1948                         continue;
1949                 list = &(*thrd_lists)[i];
1950
1951                 /* Can we ensure no future writes from this thread may break
1952                  * the release seq? */
1953                 bool future_ordered = false;
1954
1955                 ModelAction *last = get_last_action(int_to_id(i));
1956                 Thread *th = get_thread(int_to_id(i));
1957                 if ((last && rf->happens_before(last)) ||
1958                                 !is_enabled(th) ||
1959                                 th->is_complete())
1960                         future_ordered = true;
1961
1962                 ASSERT(!th->is_model_thread() || future_ordered);
1963
1964                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1965                         const ModelAction *act = *rit;
1966                         /* Reach synchronization -> this thread is complete */
1967                         if (act->happens_before(release))
1968                                 break;
1969                         if (rf->happens_before(act)) {
1970                                 future_ordered = true;
1971                                 continue;
1972                         }
1973
1974                         /* Only non-RMW writes can break release sequences */
1975                         if (!act->is_write() || act->is_rmw())
1976                                 continue;
1977
1978                         /* Check modification order */
1979                         if (mo_graph->checkReachable(rf, act)) {
1980                                 /* rf --mo--> act */
1981                                 future_ordered = true;
1982                                 continue;
1983                         }
1984                         if (mo_graph->checkReachable(act, release))
1985                                 /* act --mo--> release */
1986                                 break;
1987                         if (mo_graph->checkReachable(release, act) &&
1988                                       mo_graph->checkReachable(act, rf)) {
1989                                 /* release --mo-> act --mo--> rf */
1990                                 return true; /* complete */
1991                         }
1992                         /* act may break release sequence */
1993                         pending->writes.push_back(act);
1994                         certain = false;
1995                 }
1996                 if (!future_ordered)
1997                         certain = false; /* This thread is uncertain */
1998         }
1999
2000         if (certain) {
2001                 release_heads->push_back(release);
2002                 pending->writes.clear();
2003         } else {
2004                 pending->release = release;
2005                 pending->rf = rf;
2006         }
2007         return certain;
2008 }
2009
2010 /**
2011  * An interface for getting the release sequence head(s) with which a
2012  * given ModelAction must synchronize. This function only returns a non-empty
2013  * result when it can locate a release sequence head with certainty. Otherwise,
2014  * it may mark the internal state of the ModelChecker so that it will handle
2015  * the release sequence at a later time, causing @a acquire to update its
2016  * synchronization at some later point in execution.
2017  *
2018  * @param acquire The 'acquire' action that may synchronize with a release
2019  * sequence
2020  * @param read The read action that may read from a release sequence; this may
2021  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2022  * when 'acquire' is a fence-acquire)
2023  * @param release_heads A pass-by-reference return parameter. Will be filled
2024  * with the head(s) of the release sequence(s), if they exists with certainty.
2025  * @see ModelChecker::release_seq_heads
2026  */
2027 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2028                 ModelAction *read, rel_heads_list_t *release_heads)
2029 {
2030         const ModelAction *rf = read->get_reads_from();
2031         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2032         sequence->acquire = acquire;
2033         sequence->read = read;
2034
2035         if (!release_seq_heads(rf, release_heads, sequence)) {
2036                 /* add act to 'lazy checking' list */
2037                 pending_rel_seqs->push_back(sequence);
2038         } else {
2039                 snapshot_free(sequence);
2040         }
2041 }
2042
2043 /**
2044  * Attempt to resolve all stashed operations that might synchronize with a
2045  * release sequence for a given location. This implements the "lazy" portion of
2046  * determining whether or not a release sequence was contiguous, since not all
2047  * modification order information is present at the time an action occurs.
2048  *
2049  * @param location The location/object that should be checked for release
2050  * sequence resolutions. A NULL value means to check all locations.
2051  * @param work_queue The work queue to which to add work items as they are
2052  * generated
2053  * @return True if any updates occurred (new synchronization, new mo_graph
2054  * edges)
2055  */
2056 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2057 {
2058         bool updated = false;
2059         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2060         while (it != pending_rel_seqs->end()) {
2061                 struct release_seq *pending = *it;
2062                 ModelAction *acquire = pending->acquire;
2063                 const ModelAction *read = pending->read;
2064
2065                 /* Only resolve sequences on the given location, if provided */
2066                 if (location && read->get_location() != location) {
2067                         it++;
2068                         continue;
2069                 }
2070
2071                 const ModelAction *rf = read->get_reads_from();
2072                 rel_heads_list_t release_heads;
2073                 bool complete;
2074                 complete = release_seq_heads(rf, &release_heads, pending);
2075                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2076                         if (!acquire->has_synchronized_with(release_heads[i])) {
2077                                 if (acquire->synchronize_with(release_heads[i]))
2078                                         updated = true;
2079                                 else
2080                                         set_bad_synchronization();
2081                         }
2082                 }
2083
2084                 if (updated) {
2085                         /* Re-check all pending release sequences */
2086                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2087                         /* Re-check read-acquire for mo_graph edges */
2088                         if (acquire->is_read())
2089                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2090
2091                         /* propagate synchronization to later actions */
2092                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2093                         for (; (*rit) != acquire; rit++) {
2094                                 ModelAction *propagate = *rit;
2095                                 if (acquire->happens_before(propagate)) {
2096                                         propagate->synchronize_with(acquire);
2097                                         /* Re-check 'propagate' for mo_graph edges */
2098                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2099                                 }
2100                         }
2101                 }
2102                 if (complete) {
2103                         it = pending_rel_seqs->erase(it);
2104                         snapshot_free(pending);
2105                 } else {
2106                         it++;
2107                 }
2108         }
2109
2110         // If we resolved promises or data races, see if we have realized a data race.
2111         checkDataRaces();
2112
2113         return updated;
2114 }
2115
2116 /**
2117  * Performs various bookkeeping operations for the current ModelAction. For
2118  * instance, adds action to the per-object, per-thread action vector and to the
2119  * action trace list of all thread actions.
2120  *
2121  * @param act is the ModelAction to add.
2122  */
2123 void ModelChecker::add_action_to_lists(ModelAction *act)
2124 {
2125         int tid = id_to_int(act->get_tid());
2126         ModelAction *uninit = NULL;
2127         int uninit_id = -1;
2128         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2129         if (list->empty() && act->is_atomic_var()) {
2130                 uninit = new_uninitialized_action(act->get_location());
2131                 uninit_id = id_to_int(uninit->get_tid());
2132                 list->push_back(uninit);
2133         }
2134         list->push_back(act);
2135
2136         action_trace->push_back(act);
2137         if (uninit)
2138                 action_trace->push_front(uninit);
2139
2140         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2141         if (tid >= (int)vec->size())
2142                 vec->resize(priv->next_thread_id);
2143         (*vec)[tid].push_back(act);
2144         if (uninit)
2145                 (*vec)[uninit_id].push_front(uninit);
2146
2147         if ((int)thrd_last_action->size() <= tid)
2148                 thrd_last_action->resize(get_num_threads());
2149         (*thrd_last_action)[tid] = act;
2150         if (uninit)
2151                 (*thrd_last_action)[uninit_id] = uninit;
2152
2153         if (act->is_fence() && act->is_release()) {
2154                 if ((int)thrd_last_fence_release->size() <= tid)
2155                         thrd_last_fence_release->resize(get_num_threads());
2156                 (*thrd_last_fence_release)[tid] = act;
2157         }
2158
2159         if (act->is_wait()) {
2160                 void *mutex_loc = (void *) act->get_value();
2161                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2162
2163                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2164                 if (tid >= (int)vec->size())
2165                         vec->resize(priv->next_thread_id);
2166                 (*vec)[tid].push_back(act);
2167         }
2168 }
2169
2170 /**
2171  * @brief Get the last action performed by a particular Thread
2172  * @param tid The thread ID of the Thread in question
2173  * @return The last action in the thread
2174  */
2175 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2176 {
2177         int threadid = id_to_int(tid);
2178         if (threadid < (int)thrd_last_action->size())
2179                 return (*thrd_last_action)[id_to_int(tid)];
2180         else
2181                 return NULL;
2182 }
2183
2184 /**
2185  * @brief Get the last fence release performed by a particular Thread
2186  * @param tid The thread ID of the Thread in question
2187  * @return The last fence release in the thread, if one exists; NULL otherwise
2188  */
2189 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2190 {
2191         int threadid = id_to_int(tid);
2192         if (threadid < (int)thrd_last_fence_release->size())
2193                 return (*thrd_last_fence_release)[id_to_int(tid)];
2194         else
2195                 return NULL;
2196 }
2197
2198 /**
2199  * Gets the last memory_order_seq_cst write (in the total global sequence)
2200  * performed on a particular object (i.e., memory location), not including the
2201  * current action.
2202  * @param curr The current ModelAction; also denotes the object location to
2203  * check
2204  * @return The last seq_cst write
2205  */
2206 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2207 {
2208         void *location = curr->get_location();
2209         action_list_t *list = get_safe_ptr_action(obj_map, location);
2210         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2211         action_list_t::reverse_iterator rit;
2212         for (rit = list->rbegin(); rit != list->rend(); rit++)
2213                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2214                         return *rit;
2215         return NULL;
2216 }
2217
2218 /**
2219  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2220  * performed in a particular thread, prior to a particular fence.
2221  * @param tid The ID of the thread to check
2222  * @param before_fence The fence from which to begin the search; if NULL, then
2223  * search for the most recent fence in the thread.
2224  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2225  */
2226 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2227 {
2228         /* All fences should have NULL location */
2229         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2230         action_list_t::reverse_iterator rit = list->rbegin();
2231
2232         if (before_fence) {
2233                 for (; rit != list->rend(); rit++)
2234                         if (*rit == before_fence)
2235                                 break;
2236
2237                 ASSERT(*rit == before_fence);
2238                 rit++;
2239         }
2240
2241         for (; rit != list->rend(); rit++)
2242                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2243                         return *rit;
2244         return NULL;
2245 }
2246
2247 /**
2248  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2249  * location). This function identifies the mutex according to the current
2250  * action, which is presumed to perform on the same mutex.
2251  * @param curr The current ModelAction; also denotes the object location to
2252  * check
2253  * @return The last unlock operation
2254  */
2255 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2256 {
2257         void *location = curr->get_location();
2258         action_list_t *list = get_safe_ptr_action(obj_map, location);
2259         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2260         action_list_t::reverse_iterator rit;
2261         for (rit = list->rbegin(); rit != list->rend(); rit++)
2262                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2263                         return *rit;
2264         return NULL;
2265 }
2266
2267 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2268 {
2269         ModelAction *parent = get_last_action(tid);
2270         if (!parent)
2271                 parent = get_thread(tid)->get_creation();
2272         return parent;
2273 }
2274
2275 /**
2276  * Returns the clock vector for a given thread.
2277  * @param tid The thread whose clock vector we want
2278  * @return Desired clock vector
2279  */
2280 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2281 {
2282         return get_parent_action(tid)->get_cv();
2283 }
2284
2285 /**
2286  * Resolve a set of Promises with a current write. The set is provided in the
2287  * Node corresponding to @a write.
2288  * @param write The ModelAction that is fulfilling Promises
2289  * @return True if promises were resolved; false otherwise
2290  */
2291 bool ModelChecker::resolve_promises(ModelAction *write)
2292 {
2293         bool haveResolved = false;
2294         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2295         promise_list_t mustResolve, resolved;
2296
2297         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2298                 Promise *promise = (*promises)[promise_index];
2299                 if (write->get_node()->get_promise(i)) {
2300                         ModelAction *read = promise->get_action();
2301                         read_from(read, write);
2302                         //Make sure the promise's value matches the write's value
2303                         ASSERT(promise->is_compatible(write));
2304                         mo_graph->resolvePromise(read, write, &mustResolve);
2305
2306                         resolved.push_back(promise);
2307                         promises->erase(promises->begin() + promise_index);
2308                         actions_to_check.push_back(read);
2309
2310                         haveResolved = true;
2311                 } else
2312                         promise_index++;
2313         }
2314
2315         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2316                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2317                                 == resolved.end())
2318                         priv->failed_promise = true;
2319         }
2320         for (unsigned int i = 0; i < resolved.size(); i++)
2321                 delete resolved[i];
2322         //Check whether reading these writes has made threads unable to
2323         //resolve promises
2324
2325         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2326                 ModelAction *read = actions_to_check[i];
2327                 mo_check_promises(read, true);
2328         }
2329
2330         return haveResolved;
2331 }
2332
2333 /**
2334  * Compute the set of promises that could potentially be satisfied by this
2335  * action. Note that the set computation actually appears in the Node, not in
2336  * ModelChecker.
2337  * @param curr The ModelAction that may satisfy promises
2338  */
2339 void ModelChecker::compute_promises(ModelAction *curr)
2340 {
2341         for (unsigned int i = 0; i < promises->size(); i++) {
2342                 Promise *promise = (*promises)[i];
2343                 const ModelAction *act = promise->get_action();
2344                 if (!act->happens_before(curr) &&
2345                                 act->is_read() &&
2346                                 !act->could_synchronize_with(curr) &&
2347                                 !act->same_thread(curr) &&
2348                                 act->get_location() == curr->get_location() &&
2349                                 promise->get_value() == curr->get_value()) {
2350                         curr->get_node()->set_promise(i, act->is_rmw());
2351                 }
2352         }
2353 }
2354
2355 /** Checks promises in response to change in ClockVector Threads. */
2356 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2357 {
2358         for (unsigned int i = 0; i < promises->size(); i++) {
2359                 Promise *promise = (*promises)[i];
2360                 const ModelAction *act = promise->get_action();
2361                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2362                                 merge_cv->synchronized_since(act)) {
2363                         if (promise->eliminate_thread(tid)) {
2364                                 //Promise has failed
2365                                 priv->failed_promise = true;
2366                                 return;
2367                         }
2368                 }
2369         }
2370 }
2371
2372 void ModelChecker::check_promises_thread_disabled()
2373 {
2374         for (unsigned int i = 0; i < promises->size(); i++) {
2375                 Promise *promise = (*promises)[i];
2376                 if (promise->has_failed()) {
2377                         priv->failed_promise = true;
2378                         return;
2379                 }
2380         }
2381 }
2382
2383 /**
2384  * @brief Checks promises in response to addition to modification order for
2385  * threads.
2386  *
2387  * We test whether threads are still available for satisfying promises after an
2388  * addition to our modification order constraints. Those that are unavailable
2389  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2390  * that promise has failed.
2391  *
2392  * @param act The ModelAction which updated the modification order
2393  * @param is_read_check Should be true if act is a read and we must check for
2394  * updates to the store from which it read (there is a distinction here for
2395  * RMW's, which are both a load and a store)
2396  */
2397 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2398 {
2399         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2400
2401         for (unsigned int i = 0; i < promises->size(); i++) {
2402                 Promise *promise = (*promises)[i];
2403                 const ModelAction *pread = promise->get_action();
2404
2405                 // Is this promise on the same location?
2406                 if (!pread->same_var(write))
2407                         continue;
2408
2409                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2410                         priv->failed_promise = true;
2411                         return;
2412                 }
2413
2414                 // Don't do any lookups twice for the same thread
2415                 if (!promise->thread_is_available(act->get_tid()))
2416                         continue;
2417
2418                 if (mo_graph->checkReachable(promise, write)) {
2419                         if (mo_graph->checkPromise(write, promise)) {
2420                                 priv->failed_promise = true;
2421                                 return;
2422                         }
2423                 }
2424         }
2425 }
2426
2427 /**
2428  * Compute the set of writes that may break the current pending release
2429  * sequence. This information is extracted from previou release sequence
2430  * calculations.
2431  *
2432  * @param curr The current ModelAction. Must be a release sequence fixup
2433  * action.
2434  */
2435 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2436 {
2437         if (pending_rel_seqs->empty())
2438                 return;
2439
2440         struct release_seq *pending = pending_rel_seqs->back();
2441         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2442                 const ModelAction *write = pending->writes[i];
2443                 curr->get_node()->add_relseq_break(write);
2444         }
2445
2446         /* NULL means don't break the sequence; just synchronize */
2447         curr->get_node()->add_relseq_break(NULL);
2448 }
2449
2450 /**
2451  * Build up an initial set of all past writes that this 'read' action may read
2452  * from. This set is determined by the clock vector's "happens before"
2453  * relationship.
2454  * @param curr is the current ModelAction that we are exploring; it must be a
2455  * 'read' operation.
2456  */
2457 void ModelChecker::build_reads_from_past(ModelAction *curr)
2458 {
2459         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2460         unsigned int i;
2461         ASSERT(curr->is_read());
2462
2463         ModelAction *last_sc_write = NULL;
2464
2465         if (curr->is_seqcst())
2466                 last_sc_write = get_last_seq_cst_write(curr);
2467
2468         /* Iterate over all threads */
2469         for (i = 0; i < thrd_lists->size(); i++) {
2470                 /* Iterate over actions in thread, starting from most recent */
2471                 action_list_t *list = &(*thrd_lists)[i];
2472                 action_list_t::reverse_iterator rit;
2473                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2474                         ModelAction *act = *rit;
2475
2476                         /* Only consider 'write' actions */
2477                         if (!act->is_write() || act == curr)
2478                                 continue;
2479
2480                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2481                         bool allow_read = true;
2482
2483                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2484                                 allow_read = false;
2485                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2486                                 allow_read = false;
2487
2488                         if (allow_read) {
2489                                 /* Only add feasible reads */
2490                                 mo_graph->startChanges();
2491                                 r_modification_order(curr, act);
2492                                 if (!is_infeasible())
2493                                         curr->get_node()->add_read_from(act);
2494                                 mo_graph->rollbackChanges();
2495                         }
2496
2497                         /* Include at most one act per-thread that "happens before" curr */
2498                         if (act->happens_before(curr))
2499                                 break;
2500                 }
2501         }
2502         /* We may find no valid may-read-from only if the execution is doomed */
2503         if (!curr->get_node()->get_read_from_size()) {
2504                 priv->no_valid_reads = true;
2505                 set_assert();
2506         }
2507
2508         if (DBG_ENABLED()) {
2509                 model_print("Reached read action:\n");
2510                 curr->print();
2511                 model_print("Printing may_read_from\n");
2512                 curr->get_node()->print_may_read_from();
2513                 model_print("End printing may_read_from\n");
2514         }
2515 }
2516
2517 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2518 {
2519         while (true) {
2520                 /* UNINIT actions don't have a Node, and they never sleep */
2521                 if (write->is_uninitialized())
2522                         return true;
2523                 Node *prevnode = write->get_node()->get_parent();
2524
2525                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2526                 if (write->is_release() && thread_sleep)
2527                         return true;
2528                 if (!write->is_rmw()) {
2529                         return false;
2530                 }
2531                 if (write->get_reads_from() == NULL)
2532                         return true;
2533                 write = write->get_reads_from();
2534         }
2535 }
2536
2537 /**
2538  * @brief Create a new action representing an uninitialized atomic
2539  * @param location The memory location of the atomic object
2540  * @return A pointer to a new ModelAction
2541  */
2542 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2543 {
2544         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2545         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2546         act->create_cv(NULL);
2547         return act;
2548 }
2549
2550 static void print_list(action_list_t *list)
2551 {
2552         action_list_t::iterator it;
2553
2554         model_print("---------------------------------------------------------------------\n");
2555
2556         unsigned int hash = 0;
2557
2558         for (it = list->begin(); it != list->end(); it++) {
2559                 (*it)->print();
2560                 hash = hash^(hash<<3)^((*it)->hash());
2561         }
2562         model_print("HASH %u\n", hash);
2563         model_print("---------------------------------------------------------------------\n");
2564 }
2565
2566 #if SUPPORT_MOD_ORDER_DUMP
2567 void ModelChecker::dumpGraph(char *filename) const
2568 {
2569         char buffer[200];
2570         sprintf(buffer, "%s.dot", filename);
2571         FILE *file = fopen(buffer, "w");
2572         fprintf(file, "digraph %s {\n", filename);
2573         mo_graph->dumpNodes(file);
2574         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2575
2576         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2577                 ModelAction *action = *it;
2578                 if (action->is_read()) {
2579                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2580                         if (action->get_reads_from() != NULL)
2581                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2582                 }
2583                 if (thread_array[action->get_tid()] != NULL) {
2584                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2585                 }
2586
2587                 thread_array[action->get_tid()] = action;
2588         }
2589         fprintf(file, "}\n");
2590         model_free(thread_array);
2591         fclose(file);
2592 }
2593 #endif
2594
2595 /** @brief Prints an execution trace summary. */
2596 void ModelChecker::print_summary() const
2597 {
2598 #if SUPPORT_MOD_ORDER_DUMP
2599         char buffername[100];
2600         sprintf(buffername, "exec%04u", stats.num_total);
2601         mo_graph->dumpGraphToFile(buffername);
2602         sprintf(buffername, "graph%04u", stats.num_total);
2603         dumpGraph(buffername);
2604 #endif
2605
2606         model_print("Execution %d:", stats.num_total);
2607         if (isfeasibleprefix())
2608                 model_print("\n");
2609         else
2610                 print_infeasibility(" INFEASIBLE");
2611         print_list(action_trace);
2612         model_print("\n");
2613 }
2614
2615 /**
2616  * Add a Thread to the system for the first time. Should only be called once
2617  * per thread.
2618  * @param t The Thread to add
2619  */
2620 void ModelChecker::add_thread(Thread *t)
2621 {
2622         thread_map->put(id_to_int(t->get_id()), t);
2623         scheduler->add_thread(t);
2624 }
2625
2626 /**
2627  * Removes a thread from the scheduler.
2628  * @param the thread to remove.
2629  */
2630 void ModelChecker::remove_thread(Thread *t)
2631 {
2632         scheduler->remove_thread(t);
2633 }
2634
2635 /**
2636  * @brief Get a Thread reference by its ID
2637  * @param tid The Thread's ID
2638  * @return A Thread reference
2639  */
2640 Thread * ModelChecker::get_thread(thread_id_t tid) const
2641 {
2642         return thread_map->get(id_to_int(tid));
2643 }
2644
2645 /**
2646  * @brief Get a reference to the Thread in which a ModelAction was executed
2647  * @param act The ModelAction
2648  * @return A Thread reference
2649  */
2650 Thread * ModelChecker::get_thread(const ModelAction *act) const
2651 {
2652         return get_thread(act->get_tid());
2653 }
2654
2655 /**
2656  * @brief Check if a Thread is currently enabled
2657  * @param t The Thread to check
2658  * @return True if the Thread is currently enabled
2659  */
2660 bool ModelChecker::is_enabled(Thread *t) const
2661 {
2662         return scheduler->is_enabled(t);
2663 }
2664
2665 /**
2666  * @brief Check if a Thread is currently enabled
2667  * @param tid The ID of the Thread to check
2668  * @return True if the Thread is currently enabled
2669  */
2670 bool ModelChecker::is_enabled(thread_id_t tid) const
2671 {
2672         return scheduler->is_enabled(tid);
2673 }
2674
2675 /**
2676  * Switch from a model-checker context to a user-thread context. This is the
2677  * complement of ModelChecker::switch_to_master and must be called from the
2678  * model-checker context
2679  *
2680  * @param thread The user-thread to switch to
2681  */
2682 void ModelChecker::switch_from_master(Thread *thread)
2683 {
2684         scheduler->set_current_thread(thread);
2685         Thread::swap(&system_context, thread);
2686 }
2687
2688 /**
2689  * Switch from a user-context to the "master thread" context (a.k.a. system
2690  * context). This switch is made with the intention of exploring a particular
2691  * model-checking action (described by a ModelAction object). Must be called
2692  * from a user-thread context.
2693  *
2694  * @param act The current action that will be explored. May be NULL only if
2695  * trace is exiting via an assertion (see ModelChecker::set_assert and
2696  * ModelChecker::has_asserted).
2697  * @return Return the value returned by the current action
2698  */
2699 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2700 {
2701         DBG();
2702         Thread *old = thread_current();
2703         ASSERT(!old->get_pending());
2704         old->set_pending(act);
2705         if (Thread::swap(old, &system_context) < 0) {
2706                 perror("swap threads");
2707                 exit(EXIT_FAILURE);
2708         }
2709         return old->get_return_value();
2710 }
2711
2712 /**
2713  * Takes the next step in the execution, if possible.
2714  * @param curr The current step to take
2715  * @return Returns the next Thread to run, if any; NULL if this execution
2716  * should terminate
2717  */
2718 Thread * ModelChecker::take_step(ModelAction *curr)
2719 {
2720         Thread *curr_thrd = get_thread(curr);
2721         ASSERT(curr_thrd->get_state() == THREAD_READY);
2722
2723         curr = check_current_action(curr);
2724
2725         /* Infeasible -> don't take any more steps */
2726         if (is_infeasible())
2727                 return NULL;
2728         else if (isfeasibleprefix() && have_bug_reports()) {
2729                 set_assert();
2730                 return NULL;
2731         }
2732
2733         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2734                 return NULL;
2735
2736         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2737                 scheduler->remove_thread(curr_thrd);
2738
2739         Thread *next_thrd = get_next_thread(curr);
2740
2741         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2742                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2743
2744         return next_thrd;
2745 }
2746
2747 /** Wrapper to run the user's main function, with appropriate arguments */
2748 void user_main_wrapper(void *)
2749 {
2750         user_main(model->params.argc, model->params.argv);
2751 }
2752
2753 /** @brief Run ModelChecker for the user program */
2754 void ModelChecker::run()
2755 {
2756         do {
2757                 thrd_t user_thread;
2758                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2759                 add_thread(t);
2760
2761                 do {
2762                         /*
2763                          * Stash next pending action(s) for thread(s). There
2764                          * should only need to stash one thread's action--the
2765                          * thread which just took a step--plus the first step
2766                          * for any newly-created thread
2767                          */
2768                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2769                                 thread_id_t tid = int_to_id(i);
2770                                 Thread *thr = get_thread(tid);
2771                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2772                                         switch_from_master(thr);
2773                                 }
2774                         }
2775
2776                         /* Catch assertions from prior take_step or from
2777                          * between-ModelAction bugs (e.g., data races) */
2778                         if (has_asserted())
2779                                 break;
2780
2781                         /* Consume the next action for a Thread */
2782                         ModelAction *curr = t->get_pending();
2783                         t->set_pending(NULL);
2784                         t = take_step(curr);
2785                 } while (t && !t->is_model_thread());
2786
2787                 /*
2788                  * Launch end-of-execution release sequence fixups only when
2789                  * the execution is otherwise feasible AND there are:
2790                  *
2791                  * (1) pending release sequences
2792                  * (2) pending assertions that could be invalidated by a change
2793                  * in clock vectors (i.e., data races)
2794                  * (3) no pending promises
2795                  */
2796                 while (!pending_rel_seqs->empty() &&
2797                                 is_feasible_prefix_ignore_relseq() &&
2798                                 !unrealizedraces.empty()) {
2799                         model_print("*** WARNING: release sequence fixup action "
2800                                         "(%zu pending release seuqence(s)) ***\n",
2801                                         pending_rel_seqs->size());
2802                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2803                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2804                                         model_thread);
2805                         take_step(fixup);
2806                 };
2807         } while (next_execution());
2808
2809         print_stats();
2810 }