70732868bd3114825f3ccc8c2db51bb57e339a92
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case this will return
215  * NULL.
216  * @param curr The current ModelAction. This action might guide the choice of
217  * next thread.
218  * @return The next thread to run. If the model-checker has no preference, NULL.
219  */
220 Thread * ModelChecker::get_next_thread(ModelAction *curr)
221 {
222         thread_id_t tid;
223
224         if (curr != NULL) {
225                 /* Do not split atomic actions. */
226                 if (curr->is_rmwr())
227                         return get_thread(curr);
228                 else if (curr->get_type() == THREAD_CREATE)
229                         return curr->get_thread_operand();
230         }
231
232         /* Have we completed exploring the preselected path? */
233         if (diverge == NULL)
234                 return NULL;
235
236         /* Else, we are trying to replay an execution */
237         ModelAction *next = node_stack->get_next()->get_action();
238
239         if (next == diverge) {
240                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
241                         earliest_diverge = diverge;
242
243                 Node *nextnode = next->get_node();
244                 Node *prevnode = nextnode->get_parent();
245                 scheduler->update_sleep_set(prevnode);
246
247                 /* Reached divergence point */
248                 if (nextnode->increment_misc()) {
249                         /* The next node will try to satisfy a different misc_index values. */
250                         tid = next->get_tid();
251                         node_stack->pop_restofstack(2);
252                 } else if (nextnode->increment_promise()) {
253                         /* The next node will try to satisfy a different set of promises. */
254                         tid = next->get_tid();
255                         node_stack->pop_restofstack(2);
256                 } else if (nextnode->increment_read_from()) {
257                         /* The next node will read from a different value. */
258                         tid = next->get_tid();
259                         node_stack->pop_restofstack(2);
260                 } else if (nextnode->increment_future_value()) {
261                         /* The next node will try to read from a different future value. */
262                         tid = next->get_tid();
263                         node_stack->pop_restofstack(2);
264                 } else if (nextnode->increment_relseq_break()) {
265                         /* The next node will try to resolve a release sequence differently */
266                         tid = next->get_tid();
267                         node_stack->pop_restofstack(2);
268                 } else {
269                         ASSERT(prevnode);
270                         /* Make a different thread execute for next step */
271                         scheduler->add_sleep(get_thread(next->get_tid()));
272                         tid = prevnode->get_next_backtrack();
273                         /* Make sure the backtracked thread isn't sleeping. */
274                         node_stack->pop_restofstack(1);
275                         if (diverge == earliest_diverge) {
276                                 earliest_diverge = prevnode->get_action();
277                         }
278                 }
279                 /* The correct sleep set is in the parent node. */
280                 execute_sleep_set();
281
282                 DEBUG("*** Divergence point ***\n");
283
284                 diverge = NULL;
285         } else {
286                 tid = next->get_tid();
287         }
288         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
289         ASSERT(tid != THREAD_ID_T_NONE);
290         return thread_map->get(id_to_int(tid));
291 }
292
293 /**
294  * We need to know what the next actions of all threads in the sleep
295  * set will be.  This method computes them and stores the actions at
296  * the corresponding thread object's pending action.
297  */
298
299 void ModelChecker::execute_sleep_set()
300 {
301         for (unsigned int i = 0; i < get_num_threads(); i++) {
302                 thread_id_t tid = int_to_id(i);
303                 Thread *thr = get_thread(tid);
304                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
305                         thr->get_pending()->set_sleep_flag();
306                 }
307         }
308 }
309
310 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
311 {
312         for (unsigned int i = 0; i < get_num_threads(); i++) {
313                 Thread *thr = get_thread(int_to_id(i));
314                 if (scheduler->is_sleep_set(thr)) {
315                         ModelAction *pending_act = thr->get_pending();
316                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
317                                 //Remove this thread from sleep set
318                                 scheduler->remove_sleep(thr);
319                 }
320         }
321 }
322
323 /** @brief Alert the model-checker that an incorrectly-ordered
324  * synchronization was made */
325 void ModelChecker::set_bad_synchronization()
326 {
327         priv->bad_synchronization = true;
328 }
329
330 /**
331  * Check whether the current trace has triggered an assertion which should halt
332  * its execution.
333  *
334  * @return True, if the execution should be aborted; false otherwise
335  */
336 bool ModelChecker::has_asserted() const
337 {
338         return priv->asserted;
339 }
340
341 /**
342  * Trigger a trace assertion which should cause this execution to be halted.
343  * This can be due to a detected bug or due to an infeasibility that should
344  * halt ASAP.
345  */
346 void ModelChecker::set_assert()
347 {
348         priv->asserted = true;
349 }
350
351 /**
352  * Check if we are in a deadlock. Should only be called at the end of an
353  * execution, although it should not give false positives in the middle of an
354  * execution (there should be some ENABLED thread).
355  *
356  * @return True if program is in a deadlock; false otherwise
357  */
358 bool ModelChecker::is_deadlocked() const
359 {
360         bool blocking_threads = false;
361         for (unsigned int i = 0; i < get_num_threads(); i++) {
362                 thread_id_t tid = int_to_id(i);
363                 if (is_enabled(tid))
364                         return false;
365                 Thread *t = get_thread(tid);
366                 if (!t->is_model_thread() && t->get_pending())
367                         blocking_threads = true;
368         }
369         return blocking_threads;
370 }
371
372 /**
373  * Check if this is a complete execution. That is, have all thread completed
374  * execution (rather than exiting because sleep sets have forced a redundant
375  * execution).
376  *
377  * @return True if the execution is complete.
378  */
379 bool ModelChecker::is_complete_execution() const
380 {
381         for (unsigned int i = 0; i < get_num_threads(); i++)
382                 if (is_enabled(int_to_id(i)))
383                         return false;
384         return true;
385 }
386
387 /**
388  * @brief Assert a bug in the executing program.
389  *
390  * Use this function to assert any sort of bug in the user program. If the
391  * current trace is feasible (actually, a prefix of some feasible execution),
392  * then this execution will be aborted, printing the appropriate message. If
393  * the current trace is not yet feasible, the error message will be stashed and
394  * printed if the execution ever becomes feasible.
395  *
396  * @param msg Descriptive message for the bug (do not include newline char)
397  * @return True if bug is immediately-feasible
398  */
399 bool ModelChecker::assert_bug(const char *msg)
400 {
401         priv->bugs.push_back(new bug_message(msg));
402
403         if (isfeasibleprefix()) {
404                 set_assert();
405                 return true;
406         }
407         return false;
408 }
409
410 /**
411  * @brief Assert a bug in the executing program, asserted by a user thread
412  * @see ModelChecker::assert_bug
413  * @param msg Descriptive message for the bug (do not include newline char)
414  */
415 void ModelChecker::assert_user_bug(const char *msg)
416 {
417         /* If feasible bug, bail out now */
418         if (assert_bug(msg))
419                 switch_to_master(NULL);
420 }
421
422 /** @return True, if any bugs have been reported for this execution */
423 bool ModelChecker::have_bug_reports() const
424 {
425         return priv->bugs.size() != 0;
426 }
427
428 /** @brief Print bug report listing for this execution (if any bugs exist) */
429 void ModelChecker::print_bugs() const
430 {
431         if (have_bug_reports()) {
432                 model_print("Bug report: %zu bug%s detected\n",
433                                 priv->bugs.size(),
434                                 priv->bugs.size() > 1 ? "s" : "");
435                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
436                         priv->bugs[i]->print();
437         }
438 }
439
440 /**
441  * @brief Record end-of-execution stats
442  *
443  * Must be run when exiting an execution. Records various stats.
444  * @see struct execution_stats
445  */
446 void ModelChecker::record_stats()
447 {
448         stats.num_total++;
449         if (!isfeasibleprefix())
450                 stats.num_infeasible++;
451         else if (have_bug_reports())
452                 stats.num_buggy_executions++;
453         else if (is_complete_execution())
454                 stats.num_complete++;
455         else
456                 stats.num_redundant++;
457 }
458
459 /** @brief Print execution stats */
460 void ModelChecker::print_stats() const
461 {
462         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
463         model_print("Number of redundant executions: %d\n", stats.num_redundant);
464         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
465         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
466         model_print("Total executions: %d\n", stats.num_total);
467         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
468 }
469
470 /**
471  * @brief End-of-exeuction print
472  * @param printbugs Should any existing bugs be printed?
473  */
474 void ModelChecker::print_execution(bool printbugs) const
475 {
476         print_program_output();
477
478         if (DBG_ENABLED() || params.verbose) {
479                 model_print("Earliest divergence point since last feasible execution:\n");
480                 if (earliest_diverge)
481                         earliest_diverge->print();
482                 else
483                         model_print("(Not set)\n");
484
485                 model_print("\n");
486                 print_stats();
487         }
488
489         /* Don't print invalid bugs */
490         if (printbugs)
491                 print_bugs();
492
493         model_print("\n");
494         print_summary();
495 }
496
497 /**
498  * Queries the model-checker for more executions to explore and, if one
499  * exists, resets the model-checker state to execute a new execution.
500  *
501  * @return If there are more executions to explore, return true. Otherwise,
502  * return false.
503  */
504 bool ModelChecker::next_execution()
505 {
506         DBG();
507         /* Is this execution a feasible execution that's worth bug-checking? */
508         bool complete = isfeasibleprefix() && (is_complete_execution() ||
509                         have_bug_reports());
510
511         /* End-of-execution bug checks */
512         if (complete) {
513                 if (is_deadlocked())
514                         assert_bug("Deadlock detected");
515
516                 checkDataRaces();
517         }
518
519         record_stats();
520
521         /* Output */
522         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
523                 print_execution(complete);
524         else
525                 clear_program_output();
526
527         if (complete)
528                 earliest_diverge = NULL;
529
530         if ((diverge = get_next_backtrack()) == NULL)
531                 return false;
532
533         if (DBG_ENABLED()) {
534                 model_print("Next execution will diverge at:\n");
535                 diverge->print();
536         }
537
538         reset_to_initial_state();
539         return true;
540 }
541
542 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
543 {
544         switch (act->get_type()) {
545         case ATOMIC_FENCE:
546         case ATOMIC_READ:
547         case ATOMIC_WRITE:
548         case ATOMIC_RMW: {
549                 /* Optimization: relaxed operations don't need backtracking */
550                 if (act->is_relaxed())
551                         return NULL;
552                 /* linear search: from most recent to oldest */
553                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
554                 action_list_t::reverse_iterator rit;
555                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
556                         ModelAction *prev = *rit;
557                         if (prev->could_synchronize_with(act))
558                                 return prev;
559                 }
560                 break;
561         }
562         case ATOMIC_LOCK:
563         case ATOMIC_TRYLOCK: {
564                 /* linear search: from most recent to oldest */
565                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
566                 action_list_t::reverse_iterator rit;
567                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
568                         ModelAction *prev = *rit;
569                         if (act->is_conflicting_lock(prev))
570                                 return prev;
571                 }
572                 break;
573         }
574         case ATOMIC_UNLOCK: {
575                 /* linear search: from most recent to oldest */
576                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
577                 action_list_t::reverse_iterator rit;
578                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
579                         ModelAction *prev = *rit;
580                         if (!act->same_thread(prev) && prev->is_failed_trylock())
581                                 return prev;
582                 }
583                 break;
584         }
585         case ATOMIC_WAIT: {
586                 /* linear search: from most recent to oldest */
587                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
588                 action_list_t::reverse_iterator rit;
589                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
590                         ModelAction *prev = *rit;
591                         if (!act->same_thread(prev) && prev->is_failed_trylock())
592                                 return prev;
593                         if (!act->same_thread(prev) && prev->is_notify())
594                                 return prev;
595                 }
596                 break;
597         }
598
599         case ATOMIC_NOTIFY_ALL:
600         case ATOMIC_NOTIFY_ONE: {
601                 /* linear search: from most recent to oldest */
602                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
603                 action_list_t::reverse_iterator rit;
604                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
605                         ModelAction *prev = *rit;
606                         if (!act->same_thread(prev) && prev->is_wait())
607                                 return prev;
608                 }
609                 break;
610         }
611         default:
612                 break;
613         }
614         return NULL;
615 }
616
617 /** This method finds backtracking points where we should try to
618  * reorder the parameter ModelAction against.
619  *
620  * @param the ModelAction to find backtracking points for.
621  */
622 void ModelChecker::set_backtracking(ModelAction *act)
623 {
624         Thread *t = get_thread(act);
625         ModelAction *prev = get_last_conflict(act);
626         if (prev == NULL)
627                 return;
628
629         Node *node = prev->get_node()->get_parent();
630
631         int low_tid, high_tid;
632         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
633                 low_tid = id_to_int(act->get_tid());
634                 high_tid = low_tid + 1;
635         } else {
636                 low_tid = 0;
637                 high_tid = get_num_threads();
638         }
639
640         for (int i = low_tid; i < high_tid; i++) {
641                 thread_id_t tid = int_to_id(i);
642
643                 /* Make sure this thread can be enabled here. */
644                 if (i >= node->get_num_threads())
645                         break;
646
647                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
648                 if (node->enabled_status(tid) != THREAD_ENABLED)
649                         continue;
650
651                 /* Check if this has been explored already */
652                 if (node->has_been_explored(tid))
653                         continue;
654
655                 /* See if fairness allows */
656                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
657                         bool unfair = false;
658                         for (int t = 0; t < node->get_num_threads(); t++) {
659                                 thread_id_t tother = int_to_id(t);
660                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
661                                         unfair = true;
662                                         break;
663                                 }
664                         }
665                         if (unfair)
666                                 continue;
667                 }
668                 /* Cache the latest backtracking point */
669                 set_latest_backtrack(prev);
670
671                 /* If this is a new backtracking point, mark the tree */
672                 if (!node->set_backtrack(tid))
673                         continue;
674                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
675                                         id_to_int(prev->get_tid()),
676                                         id_to_int(t->get_id()));
677                 if (DBG_ENABLED()) {
678                         prev->print();
679                         act->print();
680                 }
681         }
682 }
683
684 /**
685  * @brief Cache the a backtracking point as the "most recent", if eligible
686  *
687  * Note that this does not prepare the NodeStack for this backtracking
688  * operation, it only caches the action on a per-execution basis
689  *
690  * @param act The operation at which we should explore a different next action
691  * (i.e., backtracking point)
692  * @return True, if this action is now the most recent backtracking point;
693  * false otherwise
694  */
695 bool ModelChecker::set_latest_backtrack(ModelAction *act)
696 {
697         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
698                 priv->next_backtrack = act;
699                 return true;
700         }
701         return false;
702 }
703
704 /**
705  * Returns last backtracking point. The model checker will explore a different
706  * path for this point in the next execution.
707  * @return The ModelAction at which the next execution should diverge.
708  */
709 ModelAction * ModelChecker::get_next_backtrack()
710 {
711         ModelAction *next = priv->next_backtrack;
712         priv->next_backtrack = NULL;
713         return next;
714 }
715
716 /**
717  * Processes a read or rmw model action.
718  * @param curr is the read model action to process.
719  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
720  * @return True if processing this read updates the mo_graph.
721  */
722 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
723 {
724         uint64_t value = VALUE_NONE;
725         bool updated = false;
726         while (true) {
727                 const ModelAction *reads_from = curr->get_node()->get_read_from();
728                 if (reads_from != NULL) {
729                         mo_graph->startChanges();
730
731                         value = reads_from->get_value();
732                         bool r_status = false;
733
734                         if (!second_part_of_rmw) {
735                                 check_recency(curr, reads_from);
736                                 r_status = r_modification_order(curr, reads_from);
737                         }
738
739                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
740                                 mo_graph->rollbackChanges();
741                                 priv->too_many_reads = false;
742                                 continue;
743                         }
744
745                         read_from(curr, reads_from);
746                         mo_graph->commitChanges();
747                         mo_check_promises(curr, true);
748
749                         updated |= r_status;
750                 } else if (!second_part_of_rmw) {
751                         /* Read from future value */
752                         struct future_value fv = curr->get_node()->get_future_value();
753                         Promise *promise = new Promise(curr, fv);
754                         value = fv.value;
755                         curr->set_read_from_promise(promise);
756                         promises->push_back(promise);
757                         mo_graph->startChanges();
758                         updated = r_modification_order(curr, promise);
759                         mo_graph->commitChanges();
760                 }
761                 get_thread(curr)->set_return_value(value);
762                 return updated;
763         }
764 }
765
766 /**
767  * Processes a lock, trylock, or unlock model action.  @param curr is
768  * the read model action to process.
769  *
770  * The try lock operation checks whether the lock is taken.  If not,
771  * it falls to the normal lock operation case.  If so, it returns
772  * fail.
773  *
774  * The lock operation has already been checked that it is enabled, so
775  * it just grabs the lock and synchronizes with the previous unlock.
776  *
777  * The unlock operation has to re-enable all of the threads that are
778  * waiting on the lock.
779  *
780  * @return True if synchronization was updated; false otherwise
781  */
782 bool ModelChecker::process_mutex(ModelAction *curr)
783 {
784         std::mutex *mutex = NULL;
785         struct std::mutex_state *state = NULL;
786
787         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
788                 mutex = (std::mutex *)curr->get_location();
789                 state = mutex->get_state();
790         } else if (curr->is_wait()) {
791                 mutex = (std::mutex *)curr->get_value();
792                 state = mutex->get_state();
793         }
794
795         switch (curr->get_type()) {
796         case ATOMIC_TRYLOCK: {
797                 bool success = !state->islocked;
798                 curr->set_try_lock(success);
799                 if (!success) {
800                         get_thread(curr)->set_return_value(0);
801                         break;
802                 }
803                 get_thread(curr)->set_return_value(1);
804         }
805                 //otherwise fall into the lock case
806         case ATOMIC_LOCK: {
807                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
808                         assert_bug("Lock access before initialization");
809                 state->islocked = true;
810                 ModelAction *unlock = get_last_unlock(curr);
811                 //synchronize with the previous unlock statement
812                 if (unlock != NULL) {
813                         curr->synchronize_with(unlock);
814                         return true;
815                 }
816                 break;
817         }
818         case ATOMIC_UNLOCK: {
819                 //unlock the lock
820                 state->islocked = false;
821                 //wake up the other threads
822                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
823                 //activate all the waiting threads
824                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
825                         scheduler->wake(get_thread(*rit));
826                 }
827                 waiters->clear();
828                 break;
829         }
830         case ATOMIC_WAIT: {
831                 //unlock the lock
832                 state->islocked = false;
833                 //wake up the other threads
834                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
835                 //activate all the waiting threads
836                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
837                         scheduler->wake(get_thread(*rit));
838                 }
839                 waiters->clear();
840                 //check whether we should go to sleep or not...simulate spurious failures
841                 if (curr->get_node()->get_misc() == 0) {
842                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
843                         //disable us
844                         scheduler->sleep(get_thread(curr));
845                 }
846                 break;
847         }
848         case ATOMIC_NOTIFY_ALL: {
849                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
850                 //activate all the waiting threads
851                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
852                         scheduler->wake(get_thread(*rit));
853                 }
854                 waiters->clear();
855                 break;
856         }
857         case ATOMIC_NOTIFY_ONE: {
858                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
859                 int wakeupthread = curr->get_node()->get_misc();
860                 action_list_t::iterator it = waiters->begin();
861                 advance(it, wakeupthread);
862                 scheduler->wake(get_thread(*it));
863                 waiters->erase(it);
864                 break;
865         }
866
867         default:
868                 ASSERT(0);
869         }
870         return false;
871 }
872
873 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
874 {
875         /* Do more ambitious checks now that mo is more complete */
876         if (mo_may_allow(writer, reader)) {
877                 Node *node = reader->get_node();
878
879                 /* Find an ancestor thread which exists at the time of the reader */
880                 Thread *write_thread = get_thread(writer);
881                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
882                         write_thread = write_thread->get_parent();
883
884                 struct future_value fv = {
885                         writer->get_value(),
886                         writer->get_seq_number() + params.maxfuturedelay,
887                         write_thread->get_id(),
888                 };
889                 if (node->add_future_value(fv))
890                         set_latest_backtrack(reader);
891         }
892 }
893
894 /**
895  * Process a write ModelAction
896  * @param curr The ModelAction to process
897  * @return True if the mo_graph was updated or promises were resolved
898  */
899 bool ModelChecker::process_write(ModelAction *curr)
900 {
901         bool updated_mod_order = w_modification_order(curr);
902         bool updated_promises = resolve_promises(curr);
903
904         if (promises->size() == 0) {
905                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
906                         struct PendingFutureValue pfv = (*futurevalues)[i];
907                         add_future_value(pfv.writer, pfv.act);
908                 }
909                 futurevalues->clear();
910         }
911
912         mo_graph->commitChanges();
913         mo_check_promises(curr, false);
914
915         get_thread(curr)->set_return_value(VALUE_NONE);
916         return updated_mod_order || updated_promises;
917 }
918
919 /**
920  * Process a fence ModelAction
921  * @param curr The ModelAction to process
922  * @return True if synchronization was updated
923  */
924 bool ModelChecker::process_fence(ModelAction *curr)
925 {
926         /*
927          * fence-relaxed: no-op
928          * fence-release: only log the occurence (not in this function), for
929          *   use in later synchronization
930          * fence-acquire (this function): search for hypothetical release
931          *   sequences
932          */
933         bool updated = false;
934         if (curr->is_acquire()) {
935                 action_list_t *list = action_trace;
936                 action_list_t::reverse_iterator rit;
937                 /* Find X : is_read(X) && X --sb-> curr */
938                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
939                         ModelAction *act = *rit;
940                         if (act == curr)
941                                 continue;
942                         if (act->get_tid() != curr->get_tid())
943                                 continue;
944                         /* Stop at the beginning of the thread */
945                         if (act->is_thread_start())
946                                 break;
947                         /* Stop once we reach a prior fence-acquire */
948                         if (act->is_fence() && act->is_acquire())
949                                 break;
950                         if (!act->is_read())
951                                 continue;
952                         /* read-acquire will find its own release sequences */
953                         if (act->is_acquire())
954                                 continue;
955
956                         /* Establish hypothetical release sequences */
957                         rel_heads_list_t release_heads;
958                         get_release_seq_heads(curr, act, &release_heads);
959                         for (unsigned int i = 0; i < release_heads.size(); i++)
960                                 if (!curr->synchronize_with(release_heads[i]))
961                                         set_bad_synchronization();
962                         if (release_heads.size() != 0)
963                                 updated = true;
964                 }
965         }
966         return updated;
967 }
968
969 /**
970  * @brief Process the current action for thread-related activity
971  *
972  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
973  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
974  * synchronization, etc.  This function is a no-op for non-THREAD actions
975  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
976  *
977  * @param curr The current action
978  * @return True if synchronization was updated or a thread completed
979  */
980 bool ModelChecker::process_thread_action(ModelAction *curr)
981 {
982         bool updated = false;
983
984         switch (curr->get_type()) {
985         case THREAD_CREATE: {
986                 thrd_t *thrd = (thrd_t *)curr->get_location();
987                 struct thread_params *params = (struct thread_params *)curr->get_value();
988                 Thread *th = new Thread(thrd, params->func, params->arg);
989                 add_thread(th);
990                 th->set_creation(curr);
991                 /* Promises can be satisfied by children */
992                 for (unsigned int i = 0; i < promises->size(); i++) {
993                         Promise *promise = (*promises)[i];
994                         if (promise->thread_is_available(curr->get_tid()))
995                                 promise->add_thread(th->get_id());
996                 }
997                 break;
998         }
999         case THREAD_JOIN: {
1000                 Thread *blocking = curr->get_thread_operand();
1001                 ModelAction *act = get_last_action(blocking->get_id());
1002                 curr->synchronize_with(act);
1003                 updated = true; /* trigger rel-seq checks */
1004                 break;
1005         }
1006         case THREAD_FINISH: {
1007                 Thread *th = get_thread(curr);
1008                 while (!th->wait_list_empty()) {
1009                         ModelAction *act = th->pop_wait_list();
1010                         scheduler->wake(get_thread(act));
1011                 }
1012                 th->complete();
1013                 /* Completed thread can't satisfy promises */
1014                 for (unsigned int i = 0; i < promises->size(); i++) {
1015                         Promise *promise = (*promises)[i];
1016                         if (promise->thread_is_available(th->get_id()))
1017                                 if (promise->eliminate_thread(th->get_id()))
1018                                         priv->failed_promise = true;
1019                 }
1020                 updated = true; /* trigger rel-seq checks */
1021                 break;
1022         }
1023         case THREAD_START: {
1024                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1025                 break;
1026         }
1027         default:
1028                 break;
1029         }
1030
1031         return updated;
1032 }
1033
1034 /**
1035  * @brief Process the current action for release sequence fixup activity
1036  *
1037  * Performs model-checker release sequence fixups for the current action,
1038  * forcing a single pending release sequence to break (with a given, potential
1039  * "loose" write) or to complete (i.e., synchronize). If a pending release
1040  * sequence forms a complete release sequence, then we must perform the fixup
1041  * synchronization, mo_graph additions, etc.
1042  *
1043  * @param curr The current action; must be a release sequence fixup action
1044  * @param work_queue The work queue to which to add work items as they are
1045  * generated
1046  */
1047 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1048 {
1049         const ModelAction *write = curr->get_node()->get_relseq_break();
1050         struct release_seq *sequence = pending_rel_seqs->back();
1051         pending_rel_seqs->pop_back();
1052         ASSERT(sequence);
1053         ModelAction *acquire = sequence->acquire;
1054         const ModelAction *rf = sequence->rf;
1055         const ModelAction *release = sequence->release;
1056         ASSERT(acquire);
1057         ASSERT(release);
1058         ASSERT(rf);
1059         ASSERT(release->same_thread(rf));
1060
1061         if (write == NULL) {
1062                 /**
1063                  * @todo Forcing a synchronization requires that we set
1064                  * modification order constraints. For instance, we can't allow
1065                  * a fixup sequence in which two separate read-acquire
1066                  * operations read from the same sequence, where the first one
1067                  * synchronizes and the other doesn't. Essentially, we can't
1068                  * allow any writes to insert themselves between 'release' and
1069                  * 'rf'
1070                  */
1071
1072                 /* Must synchronize */
1073                 if (!acquire->synchronize_with(release)) {
1074                         set_bad_synchronization();
1075                         return;
1076                 }
1077                 /* Re-check all pending release sequences */
1078                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1079                 /* Re-check act for mo_graph edges */
1080                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1081
1082                 /* propagate synchronization to later actions */
1083                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1084                 for (; (*rit) != acquire; rit++) {
1085                         ModelAction *propagate = *rit;
1086                         if (acquire->happens_before(propagate)) {
1087                                 propagate->synchronize_with(acquire);
1088                                 /* Re-check 'propagate' for mo_graph edges */
1089                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1090                         }
1091                 }
1092         } else {
1093                 /* Break release sequence with new edges:
1094                  *   release --mo--> write --mo--> rf */
1095                 mo_graph->addEdge(release, write);
1096                 mo_graph->addEdge(write, rf);
1097         }
1098
1099         /* See if we have realized a data race */
1100         checkDataRaces();
1101 }
1102
1103 /**
1104  * Initialize the current action by performing one or more of the following
1105  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1106  * in the NodeStack, manipulating backtracking sets, allocating and
1107  * initializing clock vectors, and computing the promises to fulfill.
1108  *
1109  * @param curr The current action, as passed from the user context; may be
1110  * freed/invalidated after the execution of this function, with a different
1111  * action "returned" its place (pass-by-reference)
1112  * @return True if curr is a newly-explored action; false otherwise
1113  */
1114 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1115 {
1116         ModelAction *newcurr;
1117
1118         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1119                 newcurr = process_rmw(*curr);
1120                 delete *curr;
1121
1122                 if (newcurr->is_rmw())
1123                         compute_promises(newcurr);
1124
1125                 *curr = newcurr;
1126                 return false;
1127         }
1128
1129         (*curr)->set_seq_number(get_next_seq_num());
1130
1131         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1132         if (newcurr) {
1133                 /* First restore type and order in case of RMW operation */
1134                 if ((*curr)->is_rmwr())
1135                         newcurr->copy_typeandorder(*curr);
1136
1137                 ASSERT((*curr)->get_location() == newcurr->get_location());
1138                 newcurr->copy_from_new(*curr);
1139
1140                 /* Discard duplicate ModelAction; use action from NodeStack */
1141                 delete *curr;
1142
1143                 /* Always compute new clock vector */
1144                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1145
1146                 *curr = newcurr;
1147                 return false; /* Action was explored previously */
1148         } else {
1149                 newcurr = *curr;
1150
1151                 /* Always compute new clock vector */
1152                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1153
1154                 /* Assign most recent release fence */
1155                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1156
1157                 /*
1158                  * Perform one-time actions when pushing new ModelAction onto
1159                  * NodeStack
1160                  */
1161                 if (newcurr->is_write())
1162                         compute_promises(newcurr);
1163                 else if (newcurr->is_relseq_fixup())
1164                         compute_relseq_breakwrites(newcurr);
1165                 else if (newcurr->is_wait())
1166                         newcurr->get_node()->set_misc_max(2);
1167                 else if (newcurr->is_notify_one()) {
1168                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1169                 }
1170                 return true; /* This was a new ModelAction */
1171         }
1172 }
1173
1174 /**
1175  * @brief Establish reads-from relation between two actions
1176  *
1177  * Perform basic operations involved with establishing a concrete rf relation,
1178  * including setting the ModelAction data and checking for release sequences.
1179  *
1180  * @param act The action that is reading (must be a read)
1181  * @param rf The action from which we are reading (must be a write)
1182  *
1183  * @return True if this read established synchronization
1184  */
1185 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1186 {
1187         act->set_read_from(rf);
1188         if (rf != NULL && act->is_acquire()) {
1189                 rel_heads_list_t release_heads;
1190                 get_release_seq_heads(act, act, &release_heads);
1191                 int num_heads = release_heads.size();
1192                 for (unsigned int i = 0; i < release_heads.size(); i++)
1193                         if (!act->synchronize_with(release_heads[i])) {
1194                                 set_bad_synchronization();
1195                                 num_heads--;
1196                         }
1197                 return num_heads > 0;
1198         }
1199         return false;
1200 }
1201
1202 /**
1203  * @brief Check whether a model action is enabled.
1204  *
1205  * Checks whether a lock or join operation would be successful (i.e., is the
1206  * lock already locked, or is the joined thread already complete). If not, put
1207  * the action in a waiter list.
1208  *
1209  * @param curr is the ModelAction to check whether it is enabled.
1210  * @return a bool that indicates whether the action is enabled.
1211  */
1212 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1213         if (curr->is_lock()) {
1214                 std::mutex *lock = (std::mutex *)curr->get_location();
1215                 struct std::mutex_state *state = lock->get_state();
1216                 if (state->islocked) {
1217                         //Stick the action in the appropriate waiting queue
1218                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1219                         return false;
1220                 }
1221         } else if (curr->get_type() == THREAD_JOIN) {
1222                 Thread *blocking = (Thread *)curr->get_location();
1223                 if (!blocking->is_complete()) {
1224                         blocking->push_wait_list(curr);
1225                         return false;
1226                 }
1227         }
1228
1229         return true;
1230 }
1231
1232 /**
1233  * This is the heart of the model checker routine. It performs model-checking
1234  * actions corresponding to a given "current action." Among other processes, it
1235  * calculates reads-from relationships, updates synchronization clock vectors,
1236  * forms a memory_order constraints graph, and handles replay/backtrack
1237  * execution when running permutations of previously-observed executions.
1238  *
1239  * @param curr The current action to process
1240  * @return The ModelAction that is actually executed; may be different than
1241  * curr; may be NULL, if the current action is not enabled to run
1242  */
1243 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1244 {
1245         ASSERT(curr);
1246         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1247
1248         if (!check_action_enabled(curr)) {
1249                 /* Make the execution look like we chose to run this action
1250                  * much later, when a lock/join can succeed */
1251                 get_thread(curr)->set_pending(curr);
1252                 scheduler->sleep(get_thread(curr));
1253                 return NULL;
1254         }
1255
1256         bool newly_explored = initialize_curr_action(&curr);
1257
1258         DBG();
1259         if (DBG_ENABLED())
1260                 curr->print();
1261
1262         wake_up_sleeping_actions(curr);
1263
1264         /* Add the action to lists before any other model-checking tasks */
1265         if (!second_part_of_rmw)
1266                 add_action_to_lists(curr);
1267
1268         /* Build may_read_from set for newly-created actions */
1269         if (newly_explored && curr->is_read())
1270                 build_reads_from_past(curr);
1271
1272         /* Initialize work_queue with the "current action" work */
1273         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1274         while (!work_queue.empty() && !has_asserted()) {
1275                 WorkQueueEntry work = work_queue.front();
1276                 work_queue.pop_front();
1277
1278                 switch (work.type) {
1279                 case WORK_CHECK_CURR_ACTION: {
1280                         ModelAction *act = work.action;
1281                         bool update = false; /* update this location's release seq's */
1282                         bool update_all = false; /* update all release seq's */
1283
1284                         if (process_thread_action(curr))
1285                                 update_all = true;
1286
1287                         if (act->is_read() && process_read(act, second_part_of_rmw))
1288                                 update = true;
1289
1290                         if (act->is_write() && process_write(act))
1291                                 update = true;
1292
1293                         if (act->is_fence() && process_fence(act))
1294                                 update_all = true;
1295
1296                         if (act->is_mutex_op() && process_mutex(act))
1297                                 update_all = true;
1298
1299                         if (act->is_relseq_fixup())
1300                                 process_relseq_fixup(curr, &work_queue);
1301
1302                         if (update_all)
1303                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1304                         else if (update)
1305                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1306                         break;
1307                 }
1308                 case WORK_CHECK_RELEASE_SEQ:
1309                         resolve_release_sequences(work.location, &work_queue);
1310                         break;
1311                 case WORK_CHECK_MO_EDGES: {
1312                         /** @todo Complete verification of work_queue */
1313                         ModelAction *act = work.action;
1314                         bool updated = false;
1315
1316                         if (act->is_read()) {
1317                                 const ModelAction *rf = act->get_reads_from();
1318                                 const Promise *promise = act->get_reads_from_promise();
1319                                 if (rf) {
1320                                         if (r_modification_order(act, rf))
1321                                                 updated = true;
1322                                 } else if (promise) {
1323                                         if (r_modification_order(act, promise))
1324                                                 updated = true;
1325                                 }
1326                         }
1327                         if (act->is_write()) {
1328                                 if (w_modification_order(act))
1329                                         updated = true;
1330                         }
1331                         mo_graph->commitChanges();
1332
1333                         if (updated)
1334                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1335                         break;
1336                 }
1337                 default:
1338                         ASSERT(false);
1339                         break;
1340                 }
1341         }
1342
1343         check_curr_backtracking(curr);
1344         set_backtracking(curr);
1345         return curr;
1346 }
1347
1348 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1349 {
1350         Node *currnode = curr->get_node();
1351         Node *parnode = currnode->get_parent();
1352
1353         if ((parnode && !parnode->backtrack_empty()) ||
1354                          !currnode->misc_empty() ||
1355                          !currnode->read_from_empty() ||
1356                          !currnode->future_value_empty() ||
1357                          !currnode->promise_empty() ||
1358                          !currnode->relseq_break_empty()) {
1359                 set_latest_backtrack(curr);
1360         }
1361 }
1362
1363 bool ModelChecker::promises_expired() const
1364 {
1365         for (unsigned int i = 0; i < promises->size(); i++) {
1366                 Promise *promise = (*promises)[i];
1367                 if (promise->get_expiration() < priv->used_sequence_numbers)
1368                         return true;
1369         }
1370         return false;
1371 }
1372
1373 /**
1374  * This is the strongest feasibility check available.
1375  * @return whether the current trace (partial or complete) must be a prefix of
1376  * a feasible trace.
1377  */
1378 bool ModelChecker::isfeasibleprefix() const
1379 {
1380         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1381 }
1382
1383 /**
1384  * Print disagnostic information about an infeasible execution
1385  * @param prefix A string to prefix the output with; if NULL, then a default
1386  * message prefix will be provided
1387  */
1388 void ModelChecker::print_infeasibility(const char *prefix) const
1389 {
1390         char buf[100];
1391         char *ptr = buf;
1392         if (mo_graph->checkForCycles())
1393                 ptr += sprintf(ptr, "[mo cycle]");
1394         if (priv->failed_promise)
1395                 ptr += sprintf(ptr, "[failed promise]");
1396         if (priv->too_many_reads)
1397                 ptr += sprintf(ptr, "[too many reads]");
1398         if (priv->no_valid_reads)
1399                 ptr += sprintf(ptr, "[no valid reads-from]");
1400         if (priv->bad_synchronization)
1401                 ptr += sprintf(ptr, "[bad sw ordering]");
1402         if (promises_expired())
1403                 ptr += sprintf(ptr, "[promise expired]");
1404         if (promises->size() != 0)
1405                 ptr += sprintf(ptr, "[unresolved promise]");
1406         if (ptr != buf)
1407                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1408 }
1409
1410 /**
1411  * Returns whether the current completed trace is feasible, except for pending
1412  * release sequences.
1413  */
1414 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1415 {
1416         return !is_infeasible() && promises->size() == 0;
1417 }
1418
1419 /**
1420  * Check if the current partial trace is infeasible. Does not check any
1421  * end-of-execution flags, which might rule out the execution. Thus, this is
1422  * useful only for ruling an execution as infeasible.
1423  * @return whether the current partial trace is infeasible.
1424  */
1425 bool ModelChecker::is_infeasible() const
1426 {
1427         return mo_graph->checkForCycles() ||
1428                 priv->no_valid_reads ||
1429                 priv->failed_promise ||
1430                 priv->too_many_reads ||
1431                 priv->bad_synchronization ||
1432                 promises_expired();
1433 }
1434
1435 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1436 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1437         ModelAction *lastread = get_last_action(act->get_tid());
1438         lastread->process_rmw(act);
1439         if (act->is_rmw()) {
1440                 if (lastread->get_reads_from())
1441                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1442                 else
1443                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1444                 mo_graph->commitChanges();
1445         }
1446         return lastread;
1447 }
1448
1449 /**
1450  * Checks whether a thread has read from the same write for too many times
1451  * without seeing the effects of a later write.
1452  *
1453  * Basic idea:
1454  * 1) there must a different write that we could read from that would satisfy the modification order,
1455  * 2) we must have read from the same value in excess of maxreads times, and
1456  * 3) that other write must have been in the reads_from set for maxreads times.
1457  *
1458  * If so, we decide that the execution is no longer feasible.
1459  */
1460 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1461 {
1462         if (params.maxreads != 0) {
1463                 if (curr->get_node()->get_read_from_size() <= 1)
1464                         return;
1465                 //Must make sure that execution is currently feasible...  We could
1466                 //accidentally clear by rolling back
1467                 if (is_infeasible())
1468                         return;
1469                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1470                 int tid = id_to_int(curr->get_tid());
1471
1472                 /* Skip checks */
1473                 if ((int)thrd_lists->size() <= tid)
1474                         return;
1475                 action_list_t *list = &(*thrd_lists)[tid];
1476
1477                 action_list_t::reverse_iterator rit = list->rbegin();
1478                 /* Skip past curr */
1479                 for (; (*rit) != curr; rit++)
1480                         ;
1481                 /* go past curr now */
1482                 rit++;
1483
1484                 action_list_t::reverse_iterator ritcopy = rit;
1485                 //See if we have enough reads from the same value
1486                 int count = 0;
1487                 for (; count < params.maxreads; rit++, count++) {
1488                         if (rit == list->rend())
1489                                 return;
1490                         ModelAction *act = *rit;
1491                         if (!act->is_read())
1492                                 return;
1493
1494                         if (act->get_reads_from() != rf)
1495                                 return;
1496                         if (act->get_node()->get_read_from_size() <= 1)
1497                                 return;
1498                 }
1499                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1500                         /* Get write */
1501                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1502
1503                         /* Need a different write */
1504                         if (write == rf)
1505                                 continue;
1506
1507                         /* Test to see whether this is a feasible write to read from */
1508                         /** NOTE: all members of read-from set should be
1509                          *  feasible, so we no longer check it here **/
1510
1511                         rit = ritcopy;
1512
1513                         bool feasiblewrite = true;
1514                         //new we need to see if this write works for everyone
1515
1516                         for (int loop = count; loop > 0; loop--, rit++) {
1517                                 ModelAction *act = *rit;
1518                                 bool foundvalue = false;
1519                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1520                                         if (act->get_node()->get_read_from_at(j) == write) {
1521                                                 foundvalue = true;
1522                                                 break;
1523                                         }
1524                                 }
1525                                 if (!foundvalue) {
1526                                         feasiblewrite = false;
1527                                         break;
1528                                 }
1529                         }
1530                         if (feasiblewrite) {
1531                                 priv->too_many_reads = true;
1532                                 return;
1533                         }
1534                 }
1535         }
1536 }
1537
1538 /**
1539  * Updates the mo_graph with the constraints imposed from the current
1540  * read.
1541  *
1542  * Basic idea is the following: Go through each other thread and find
1543  * the last action that happened before our read.  Two cases:
1544  *
1545  * (1) The action is a write => that write must either occur before
1546  * the write we read from or be the write we read from.
1547  *
1548  * (2) The action is a read => the write that that action read from
1549  * must occur before the write we read from or be the same write.
1550  *
1551  * @param curr The current action. Must be a read.
1552  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1553  * @return True if modification order edges were added; false otherwise
1554  */
1555 template <typename rf_type>
1556 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1557 {
1558         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1559         unsigned int i;
1560         bool added = false;
1561         ASSERT(curr->is_read());
1562
1563         /* Last SC fence in the current thread */
1564         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1565
1566         /* Iterate over all threads */
1567         for (i = 0; i < thrd_lists->size(); i++) {
1568                 /* Last SC fence in thread i */
1569                 ModelAction *last_sc_fence_thread_local = NULL;
1570                 if (int_to_id((int)i) != curr->get_tid())
1571                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1572
1573                 /* Last SC fence in thread i, before last SC fence in current thread */
1574                 ModelAction *last_sc_fence_thread_before = NULL;
1575                 if (last_sc_fence_local)
1576                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1577
1578                 /* Iterate over actions in thread, starting from most recent */
1579                 action_list_t *list = &(*thrd_lists)[i];
1580                 action_list_t::reverse_iterator rit;
1581                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1582                         ModelAction *act = *rit;
1583
1584                         if (act->is_write() && !act->equals(rf) && act != curr) {
1585                                 /* C++, Section 29.3 statement 5 */
1586                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1587                                                 *act < *last_sc_fence_thread_local) {
1588                                         added = mo_graph->addEdge(act, rf) || added;
1589                                         break;
1590                                 }
1591                                 /* C++, Section 29.3 statement 4 */
1592                                 else if (act->is_seqcst() && last_sc_fence_local &&
1593                                                 *act < *last_sc_fence_local) {
1594                                         added = mo_graph->addEdge(act, rf) || added;
1595                                         break;
1596                                 }
1597                                 /* C++, Section 29.3 statement 6 */
1598                                 else if (last_sc_fence_thread_before &&
1599                                                 *act < *last_sc_fence_thread_before) {
1600                                         added = mo_graph->addEdge(act, rf) || added;
1601                                         break;
1602                                 }
1603                         }
1604
1605                         /*
1606                          * Include at most one act per-thread that "happens
1607                          * before" curr. Don't consider reflexively.
1608                          */
1609                         if (act->happens_before(curr) && act != curr) {
1610                                 if (act->is_write()) {
1611                                         if (!act->equals(rf)) {
1612                                                 added = mo_graph->addEdge(act, rf) || added;
1613                                         }
1614                                 } else {
1615                                         const ModelAction *prevreadfrom = act->get_reads_from();
1616                                         //if the previous read is unresolved, keep going...
1617                                         if (prevreadfrom == NULL)
1618                                                 continue;
1619
1620                                         if (!prevreadfrom->equals(rf)) {
1621                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1622                                         }
1623                                 }
1624                                 break;
1625                         }
1626                 }
1627         }
1628
1629         /*
1630          * All compatible, thread-exclusive promises must be ordered after any
1631          * concrete loads from the same thread
1632          */
1633         for (unsigned int i = 0; i < promises->size(); i++)
1634                 if ((*promises)[i]->is_compatible_exclusive(curr))
1635                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1636
1637         return added;
1638 }
1639
1640 /**
1641  * Updates the mo_graph with the constraints imposed from the current write.
1642  *
1643  * Basic idea is the following: Go through each other thread and find
1644  * the lastest action that happened before our write.  Two cases:
1645  *
1646  * (1) The action is a write => that write must occur before
1647  * the current write
1648  *
1649  * (2) The action is a read => the write that that action read from
1650  * must occur before the current write.
1651  *
1652  * This method also handles two other issues:
1653  *
1654  * (I) Sequential Consistency: Making sure that if the current write is
1655  * seq_cst, that it occurs after the previous seq_cst write.
1656  *
1657  * (II) Sending the write back to non-synchronizing reads.
1658  *
1659  * @param curr The current action. Must be a write.
1660  * @return True if modification order edges were added; false otherwise
1661  */
1662 bool ModelChecker::w_modification_order(ModelAction *curr)
1663 {
1664         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1665         unsigned int i;
1666         bool added = false;
1667         ASSERT(curr->is_write());
1668
1669         if (curr->is_seqcst()) {
1670                 /* We have to at least see the last sequentially consistent write,
1671                          so we are initialized. */
1672                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1673                 if (last_seq_cst != NULL) {
1674                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1675                 }
1676         }
1677
1678         /* Last SC fence in the current thread */
1679         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1680
1681         /* Iterate over all threads */
1682         for (i = 0; i < thrd_lists->size(); i++) {
1683                 /* Last SC fence in thread i, before last SC fence in current thread */
1684                 ModelAction *last_sc_fence_thread_before = NULL;
1685                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1686                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1687
1688                 /* Iterate over actions in thread, starting from most recent */
1689                 action_list_t *list = &(*thrd_lists)[i];
1690                 action_list_t::reverse_iterator rit;
1691                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1692                         ModelAction *act = *rit;
1693                         if (act == curr) {
1694                                 /*
1695                                  * 1) If RMW and it actually read from something, then we
1696                                  * already have all relevant edges, so just skip to next
1697                                  * thread.
1698                                  *
1699                                  * 2) If RMW and it didn't read from anything, we should
1700                                  * whatever edge we can get to speed up convergence.
1701                                  *
1702                                  * 3) If normal write, we need to look at earlier actions, so
1703                                  * continue processing list.
1704                                  */
1705                                 if (curr->is_rmw()) {
1706                                         if (curr->get_reads_from() != NULL)
1707                                                 break;
1708                                         else
1709                                                 continue;
1710                                 } else
1711                                         continue;
1712                         }
1713
1714                         /* C++, Section 29.3 statement 7 */
1715                         if (last_sc_fence_thread_before && act->is_write() &&
1716                                         *act < *last_sc_fence_thread_before) {
1717                                 added = mo_graph->addEdge(act, curr) || added;
1718                                 break;
1719                         }
1720
1721                         /*
1722                          * Include at most one act per-thread that "happens
1723                          * before" curr
1724                          */
1725                         if (act->happens_before(curr)) {
1726                                 /*
1727                                  * Note: if act is RMW, just add edge:
1728                                  *   act --mo--> curr
1729                                  * The following edge should be handled elsewhere:
1730                                  *   readfrom(act) --mo--> act
1731                                  */
1732                                 if (act->is_write())
1733                                         added = mo_graph->addEdge(act, curr) || added;
1734                                 else if (act->is_read()) {
1735                                         //if previous read accessed a null, just keep going
1736                                         if (act->get_reads_from() == NULL)
1737                                                 continue;
1738                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1739                                 }
1740                                 break;
1741                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1742                                                      !act->same_thread(curr)) {
1743                                 /* We have an action that:
1744                                    (1) did not happen before us
1745                                    (2) is a read and we are a write
1746                                    (3) cannot synchronize with us
1747                                    (4) is in a different thread
1748                                    =>
1749                                    that read could potentially read from our write.  Note that
1750                                    these checks are overly conservative at this point, we'll
1751                                    do more checks before actually removing the
1752                                    pendingfuturevalue.
1753
1754                                  */
1755                                 if (thin_air_constraint_may_allow(curr, act)) {
1756                                         if (!is_infeasible())
1757                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1758                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1759                                                 add_future_value(curr, act);
1760                                 }
1761                         }
1762                 }
1763         }
1764
1765         /*
1766          * All compatible, thread-exclusive promises must be ordered after any
1767          * concrete stores to the same thread, or else they can be merged with
1768          * this store later
1769          */
1770         for (unsigned int i = 0; i < promises->size(); i++)
1771                 if ((*promises)[i]->is_compatible_exclusive(curr))
1772                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1773
1774         return added;
1775 }
1776
1777 /** Arbitrary reads from the future are not allowed.  Section 29.3
1778  * part 9 places some constraints.  This method checks one result of constraint
1779  * constraint.  Others require compiler support. */
1780 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1781 {
1782         if (!writer->is_rmw())
1783                 return true;
1784
1785         if (!reader->is_rmw())
1786                 return true;
1787
1788         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1789                 if (search == reader)
1790                         return false;
1791                 if (search->get_tid() == reader->get_tid() &&
1792                                 search->happens_before(reader))
1793                         break;
1794         }
1795
1796         return true;
1797 }
1798
1799 /**
1800  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1801  * some constraints. This method checks one the following constraint (others
1802  * require compiler support):
1803  *
1804  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1805  */
1806 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1807 {
1808         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1809         unsigned int i;
1810         /* Iterate over all threads */
1811         for (i = 0; i < thrd_lists->size(); i++) {
1812                 const ModelAction *write_after_read = NULL;
1813
1814                 /* Iterate over actions in thread, starting from most recent */
1815                 action_list_t *list = &(*thrd_lists)[i];
1816                 action_list_t::reverse_iterator rit;
1817                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1818                         ModelAction *act = *rit;
1819
1820                         /* Don't disallow due to act == reader */
1821                         if (!reader->happens_before(act) || reader == act)
1822                                 break;
1823                         else if (act->is_write())
1824                                 write_after_read = act;
1825                         else if (act->is_read() && act->get_reads_from() != NULL)
1826                                 write_after_read = act->get_reads_from();
1827                 }
1828
1829                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1830                         return false;
1831         }
1832         return true;
1833 }
1834
1835 /**
1836  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1837  * The ModelAction under consideration is expected to be taking part in
1838  * release/acquire synchronization as an object of the "reads from" relation.
1839  * Note that this can only provide release sequence support for RMW chains
1840  * which do not read from the future, as those actions cannot be traced until
1841  * their "promise" is fulfilled. Similarly, we may not even establish the
1842  * presence of a release sequence with certainty, as some modification order
1843  * constraints may be decided further in the future. Thus, this function
1844  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1845  * and a boolean representing certainty.
1846  *
1847  * @param rf The action that might be part of a release sequence. Must be a
1848  * write.
1849  * @param release_heads A pass-by-reference style return parameter. After
1850  * execution of this function, release_heads will contain the heads of all the
1851  * relevant release sequences, if any exists with certainty
1852  * @param pending A pass-by-reference style return parameter which is only used
1853  * when returning false (i.e., uncertain). Returns most information regarding
1854  * an uncertain release sequence, including any write operations that might
1855  * break the sequence.
1856  * @return true, if the ModelChecker is certain that release_heads is complete;
1857  * false otherwise
1858  */
1859 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1860                 rel_heads_list_t *release_heads,
1861                 struct release_seq *pending) const
1862 {
1863         /* Only check for release sequences if there are no cycles */
1864         if (mo_graph->checkForCycles())
1865                 return false;
1866
1867         while (rf) {
1868                 ASSERT(rf->is_write());
1869
1870                 if (rf->is_release())
1871                         release_heads->push_back(rf);
1872                 else if (rf->get_last_fence_release())
1873                         release_heads->push_back(rf->get_last_fence_release());
1874                 if (!rf->is_rmw())
1875                         break; /* End of RMW chain */
1876
1877                 /** @todo Need to be smarter here...  In the linux lock
1878                  * example, this will run to the beginning of the program for
1879                  * every acquire. */
1880                 /** @todo The way to be smarter here is to keep going until 1
1881                  * thread has a release preceded by an acquire and you've seen
1882                  *       both. */
1883
1884                 /* acq_rel RMW is a sufficient stopping condition */
1885                 if (rf->is_acquire() && rf->is_release())
1886                         return true; /* complete */
1887
1888                 rf = rf->get_reads_from();
1889         };
1890         if (!rf) {
1891                 /* read from future: need to settle this later */
1892                 pending->rf = NULL;
1893                 return false; /* incomplete */
1894         }
1895
1896         if (rf->is_release())
1897                 return true; /* complete */
1898
1899         /* else relaxed write
1900          * - check for fence-release in the same thread (29.8, stmt. 3)
1901          * - check modification order for contiguous subsequence
1902          *   -> rf must be same thread as release */
1903
1904         const ModelAction *fence_release = rf->get_last_fence_release();
1905         /* Synchronize with a fence-release unconditionally; we don't need to
1906          * find any more "contiguous subsequence..." for it */
1907         if (fence_release)
1908                 release_heads->push_back(fence_release);
1909
1910         int tid = id_to_int(rf->get_tid());
1911         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1912         action_list_t *list = &(*thrd_lists)[tid];
1913         action_list_t::const_reverse_iterator rit;
1914
1915         /* Find rf in the thread list */
1916         rit = std::find(list->rbegin(), list->rend(), rf);
1917         ASSERT(rit != list->rend());
1918
1919         /* Find the last {write,fence}-release */
1920         for (; rit != list->rend(); rit++) {
1921                 if (fence_release && *(*rit) < *fence_release)
1922                         break;
1923                 if ((*rit)->is_release())
1924                         break;
1925         }
1926         if (rit == list->rend()) {
1927                 /* No write-release in this thread */
1928                 return true; /* complete */
1929         } else if (fence_release && *(*rit) < *fence_release) {
1930                 /* The fence-release is more recent (and so, "stronger") than
1931                  * the most recent write-release */
1932                 return true; /* complete */
1933         } /* else, need to establish contiguous release sequence */
1934         ModelAction *release = *rit;
1935
1936         ASSERT(rf->same_thread(release));
1937
1938         pending->writes.clear();
1939
1940         bool certain = true;
1941         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1942                 if (id_to_int(rf->get_tid()) == (int)i)
1943                         continue;
1944                 list = &(*thrd_lists)[i];
1945
1946                 /* Can we ensure no future writes from this thread may break
1947                  * the release seq? */
1948                 bool future_ordered = false;
1949
1950                 ModelAction *last = get_last_action(int_to_id(i));
1951                 Thread *th = get_thread(int_to_id(i));
1952                 if ((last && rf->happens_before(last)) ||
1953                                 !is_enabled(th) ||
1954                                 th->is_complete())
1955                         future_ordered = true;
1956
1957                 ASSERT(!th->is_model_thread() || future_ordered);
1958
1959                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1960                         const ModelAction *act = *rit;
1961                         /* Reach synchronization -> this thread is complete */
1962                         if (act->happens_before(release))
1963                                 break;
1964                         if (rf->happens_before(act)) {
1965                                 future_ordered = true;
1966                                 continue;
1967                         }
1968
1969                         /* Only non-RMW writes can break release sequences */
1970                         if (!act->is_write() || act->is_rmw())
1971                                 continue;
1972
1973                         /* Check modification order */
1974                         if (mo_graph->checkReachable(rf, act)) {
1975                                 /* rf --mo--> act */
1976                                 future_ordered = true;
1977                                 continue;
1978                         }
1979                         if (mo_graph->checkReachable(act, release))
1980                                 /* act --mo--> release */
1981                                 break;
1982                         if (mo_graph->checkReachable(release, act) &&
1983                                       mo_graph->checkReachable(act, rf)) {
1984                                 /* release --mo-> act --mo--> rf */
1985                                 return true; /* complete */
1986                         }
1987                         /* act may break release sequence */
1988                         pending->writes.push_back(act);
1989                         certain = false;
1990                 }
1991                 if (!future_ordered)
1992                         certain = false; /* This thread is uncertain */
1993         }
1994
1995         if (certain) {
1996                 release_heads->push_back(release);
1997                 pending->writes.clear();
1998         } else {
1999                 pending->release = release;
2000                 pending->rf = rf;
2001         }
2002         return certain;
2003 }
2004
2005 /**
2006  * An interface for getting the release sequence head(s) with which a
2007  * given ModelAction must synchronize. This function only returns a non-empty
2008  * result when it can locate a release sequence head with certainty. Otherwise,
2009  * it may mark the internal state of the ModelChecker so that it will handle
2010  * the release sequence at a later time, causing @a acquire to update its
2011  * synchronization at some later point in execution.
2012  *
2013  * @param acquire The 'acquire' action that may synchronize with a release
2014  * sequence
2015  * @param read The read action that may read from a release sequence; this may
2016  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2017  * when 'acquire' is a fence-acquire)
2018  * @param release_heads A pass-by-reference return parameter. Will be filled
2019  * with the head(s) of the release sequence(s), if they exists with certainty.
2020  * @see ModelChecker::release_seq_heads
2021  */
2022 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2023                 ModelAction *read, rel_heads_list_t *release_heads)
2024 {
2025         const ModelAction *rf = read->get_reads_from();
2026         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2027         sequence->acquire = acquire;
2028         sequence->read = read;
2029
2030         if (!release_seq_heads(rf, release_heads, sequence)) {
2031                 /* add act to 'lazy checking' list */
2032                 pending_rel_seqs->push_back(sequence);
2033         } else {
2034                 snapshot_free(sequence);
2035         }
2036 }
2037
2038 /**
2039  * Attempt to resolve all stashed operations that might synchronize with a
2040  * release sequence for a given location. This implements the "lazy" portion of
2041  * determining whether or not a release sequence was contiguous, since not all
2042  * modification order information is present at the time an action occurs.
2043  *
2044  * @param location The location/object that should be checked for release
2045  * sequence resolutions. A NULL value means to check all locations.
2046  * @param work_queue The work queue to which to add work items as they are
2047  * generated
2048  * @return True if any updates occurred (new synchronization, new mo_graph
2049  * edges)
2050  */
2051 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2052 {
2053         bool updated = false;
2054         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2055         while (it != pending_rel_seqs->end()) {
2056                 struct release_seq *pending = *it;
2057                 ModelAction *acquire = pending->acquire;
2058                 const ModelAction *read = pending->read;
2059
2060                 /* Only resolve sequences on the given location, if provided */
2061                 if (location && read->get_location() != location) {
2062                         it++;
2063                         continue;
2064                 }
2065
2066                 const ModelAction *rf = read->get_reads_from();
2067                 rel_heads_list_t release_heads;
2068                 bool complete;
2069                 complete = release_seq_heads(rf, &release_heads, pending);
2070                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2071                         if (!acquire->has_synchronized_with(release_heads[i])) {
2072                                 if (acquire->synchronize_with(release_heads[i]))
2073                                         updated = true;
2074                                 else
2075                                         set_bad_synchronization();
2076                         }
2077                 }
2078
2079                 if (updated) {
2080                         /* Re-check all pending release sequences */
2081                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2082                         /* Re-check read-acquire for mo_graph edges */
2083                         if (acquire->is_read())
2084                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2085
2086                         /* propagate synchronization to later actions */
2087                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2088                         for (; (*rit) != acquire; rit++) {
2089                                 ModelAction *propagate = *rit;
2090                                 if (acquire->happens_before(propagate)) {
2091                                         propagate->synchronize_with(acquire);
2092                                         /* Re-check 'propagate' for mo_graph edges */
2093                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2094                                 }
2095                         }
2096                 }
2097                 if (complete) {
2098                         it = pending_rel_seqs->erase(it);
2099                         snapshot_free(pending);
2100                 } else {
2101                         it++;
2102                 }
2103         }
2104
2105         // If we resolved promises or data races, see if we have realized a data race.
2106         checkDataRaces();
2107
2108         return updated;
2109 }
2110
2111 /**
2112  * Performs various bookkeeping operations for the current ModelAction. For
2113  * instance, adds action to the per-object, per-thread action vector and to the
2114  * action trace list of all thread actions.
2115  *
2116  * @param act is the ModelAction to add.
2117  */
2118 void ModelChecker::add_action_to_lists(ModelAction *act)
2119 {
2120         int tid = id_to_int(act->get_tid());
2121         ModelAction *uninit = NULL;
2122         int uninit_id = -1;
2123         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2124         if (list->empty() && act->is_atomic_var()) {
2125                 uninit = new_uninitialized_action(act->get_location());
2126                 uninit_id = id_to_int(uninit->get_tid());
2127                 list->push_back(uninit);
2128         }
2129         list->push_back(act);
2130
2131         action_trace->push_back(act);
2132         if (uninit)
2133                 action_trace->push_front(uninit);
2134
2135         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2136         if (tid >= (int)vec->size())
2137                 vec->resize(priv->next_thread_id);
2138         (*vec)[tid].push_back(act);
2139         if (uninit)
2140                 (*vec)[uninit_id].push_front(uninit);
2141
2142         if ((int)thrd_last_action->size() <= tid)
2143                 thrd_last_action->resize(get_num_threads());
2144         (*thrd_last_action)[tid] = act;
2145         if (uninit)
2146                 (*thrd_last_action)[uninit_id] = uninit;
2147
2148         if (act->is_fence() && act->is_release()) {
2149                 if ((int)thrd_last_fence_release->size() <= tid)
2150                         thrd_last_fence_release->resize(get_num_threads());
2151                 (*thrd_last_fence_release)[tid] = act;
2152         }
2153
2154         if (act->is_wait()) {
2155                 void *mutex_loc = (void *) act->get_value();
2156                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2157
2158                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2159                 if (tid >= (int)vec->size())
2160                         vec->resize(priv->next_thread_id);
2161                 (*vec)[tid].push_back(act);
2162         }
2163 }
2164
2165 /**
2166  * @brief Get the last action performed by a particular Thread
2167  * @param tid The thread ID of the Thread in question
2168  * @return The last action in the thread
2169  */
2170 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2171 {
2172         int threadid = id_to_int(tid);
2173         if (threadid < (int)thrd_last_action->size())
2174                 return (*thrd_last_action)[id_to_int(tid)];
2175         else
2176                 return NULL;
2177 }
2178
2179 /**
2180  * @brief Get the last fence release performed by a particular Thread
2181  * @param tid The thread ID of the Thread in question
2182  * @return The last fence release in the thread, if one exists; NULL otherwise
2183  */
2184 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2185 {
2186         int threadid = id_to_int(tid);
2187         if (threadid < (int)thrd_last_fence_release->size())
2188                 return (*thrd_last_fence_release)[id_to_int(tid)];
2189         else
2190                 return NULL;
2191 }
2192
2193 /**
2194  * Gets the last memory_order_seq_cst write (in the total global sequence)
2195  * performed on a particular object (i.e., memory location), not including the
2196  * current action.
2197  * @param curr The current ModelAction; also denotes the object location to
2198  * check
2199  * @return The last seq_cst write
2200  */
2201 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2202 {
2203         void *location = curr->get_location();
2204         action_list_t *list = get_safe_ptr_action(obj_map, location);
2205         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2206         action_list_t::reverse_iterator rit;
2207         for (rit = list->rbegin(); rit != list->rend(); rit++)
2208                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2209                         return *rit;
2210         return NULL;
2211 }
2212
2213 /**
2214  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2215  * performed in a particular thread, prior to a particular fence.
2216  * @param tid The ID of the thread to check
2217  * @param before_fence The fence from which to begin the search; if NULL, then
2218  * search for the most recent fence in the thread.
2219  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2220  */
2221 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2222 {
2223         /* All fences should have NULL location */
2224         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2225         action_list_t::reverse_iterator rit = list->rbegin();
2226
2227         if (before_fence) {
2228                 for (; rit != list->rend(); rit++)
2229                         if (*rit == before_fence)
2230                                 break;
2231
2232                 ASSERT(*rit == before_fence);
2233                 rit++;
2234         }
2235
2236         for (; rit != list->rend(); rit++)
2237                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2238                         return *rit;
2239         return NULL;
2240 }
2241
2242 /**
2243  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2244  * location). This function identifies the mutex according to the current
2245  * action, which is presumed to perform on the same mutex.
2246  * @param curr The current ModelAction; also denotes the object location to
2247  * check
2248  * @return The last unlock operation
2249  */
2250 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2251 {
2252         void *location = curr->get_location();
2253         action_list_t *list = get_safe_ptr_action(obj_map, location);
2254         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2255         action_list_t::reverse_iterator rit;
2256         for (rit = list->rbegin(); rit != list->rend(); rit++)
2257                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2258                         return *rit;
2259         return NULL;
2260 }
2261
2262 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2263 {
2264         ModelAction *parent = get_last_action(tid);
2265         if (!parent)
2266                 parent = get_thread(tid)->get_creation();
2267         return parent;
2268 }
2269
2270 /**
2271  * Returns the clock vector for a given thread.
2272  * @param tid The thread whose clock vector we want
2273  * @return Desired clock vector
2274  */
2275 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2276 {
2277         return get_parent_action(tid)->get_cv();
2278 }
2279
2280 /**
2281  * Resolve a set of Promises with a current write. The set is provided in the
2282  * Node corresponding to @a write.
2283  * @param write The ModelAction that is fulfilling Promises
2284  * @return True if promises were resolved; false otherwise
2285  */
2286 bool ModelChecker::resolve_promises(ModelAction *write)
2287 {
2288         bool haveResolved = false;
2289         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2290         promise_list_t mustResolve, resolved;
2291
2292         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2293                 Promise *promise = (*promises)[promise_index];
2294                 if (write->get_node()->get_promise(i)) {
2295                         ModelAction *read = promise->get_action();
2296                         read_from(read, write);
2297                         //Make sure the promise's value matches the write's value
2298                         ASSERT(promise->is_compatible(write));
2299                         mo_graph->resolvePromise(read, write, &mustResolve);
2300
2301                         resolved.push_back(promise);
2302                         promises->erase(promises->begin() + promise_index);
2303                         actions_to_check.push_back(read);
2304
2305                         haveResolved = true;
2306                 } else
2307                         promise_index++;
2308         }
2309
2310         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2311                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2312                                 == resolved.end())
2313                         priv->failed_promise = true;
2314         }
2315         for (unsigned int i = 0; i < resolved.size(); i++)
2316                 delete resolved[i];
2317         //Check whether reading these writes has made threads unable to
2318         //resolve promises
2319
2320         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2321                 ModelAction *read = actions_to_check[i];
2322                 mo_check_promises(read, true);
2323         }
2324
2325         return haveResolved;
2326 }
2327
2328 /**
2329  * Compute the set of promises that could potentially be satisfied by this
2330  * action. Note that the set computation actually appears in the Node, not in
2331  * ModelChecker.
2332  * @param curr The ModelAction that may satisfy promises
2333  */
2334 void ModelChecker::compute_promises(ModelAction *curr)
2335 {
2336         for (unsigned int i = 0; i < promises->size(); i++) {
2337                 Promise *promise = (*promises)[i];
2338                 const ModelAction *act = promise->get_action();
2339                 if (!act->happens_before(curr) &&
2340                                 act->is_read() &&
2341                                 !act->could_synchronize_with(curr) &&
2342                                 !act->same_thread(curr) &&
2343                                 act->get_location() == curr->get_location() &&
2344                                 promise->get_value() == curr->get_value()) {
2345                         curr->get_node()->set_promise(i, act->is_rmw());
2346                 }
2347         }
2348 }
2349
2350 /** Checks promises in response to change in ClockVector Threads. */
2351 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2352 {
2353         for (unsigned int i = 0; i < promises->size(); i++) {
2354                 Promise *promise = (*promises)[i];
2355                 const ModelAction *act = promise->get_action();
2356                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2357                                 merge_cv->synchronized_since(act)) {
2358                         if (promise->eliminate_thread(tid)) {
2359                                 //Promise has failed
2360                                 priv->failed_promise = true;
2361                                 return;
2362                         }
2363                 }
2364         }
2365 }
2366
2367 void ModelChecker::check_promises_thread_disabled()
2368 {
2369         for (unsigned int i = 0; i < promises->size(); i++) {
2370                 Promise *promise = (*promises)[i];
2371                 if (promise->has_failed()) {
2372                         priv->failed_promise = true;
2373                         return;
2374                 }
2375         }
2376 }
2377
2378 /**
2379  * @brief Checks promises in response to addition to modification order for
2380  * threads.
2381  *
2382  * We test whether threads are still available for satisfying promises after an
2383  * addition to our modification order constraints. Those that are unavailable
2384  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2385  * that promise has failed.
2386  *
2387  * @param act The ModelAction which updated the modification order
2388  * @param is_read_check Should be true if act is a read and we must check for
2389  * updates to the store from which it read (there is a distinction here for
2390  * RMW's, which are both a load and a store)
2391  */
2392 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2393 {
2394         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2395
2396         for (unsigned int i = 0; i < promises->size(); i++) {
2397                 Promise *promise = (*promises)[i];
2398                 const ModelAction *pread = promise->get_action();
2399
2400                 // Is this promise on the same location?
2401                 if (!pread->same_var(write))
2402                         continue;
2403
2404                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2405                         priv->failed_promise = true;
2406                         return;
2407                 }
2408
2409                 // Don't do any lookups twice for the same thread
2410                 if (!promise->thread_is_available(act->get_tid()))
2411                         continue;
2412
2413                 if (mo_graph->checkReachable(promise, write)) {
2414                         if (mo_graph->checkPromise(write, promise)) {
2415                                 priv->failed_promise = true;
2416                                 return;
2417                         }
2418                 }
2419         }
2420 }
2421
2422 /**
2423  * Compute the set of writes that may break the current pending release
2424  * sequence. This information is extracted from previou release sequence
2425  * calculations.
2426  *
2427  * @param curr The current ModelAction. Must be a release sequence fixup
2428  * action.
2429  */
2430 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2431 {
2432         if (pending_rel_seqs->empty())
2433                 return;
2434
2435         struct release_seq *pending = pending_rel_seqs->back();
2436         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2437                 const ModelAction *write = pending->writes[i];
2438                 curr->get_node()->add_relseq_break(write);
2439         }
2440
2441         /* NULL means don't break the sequence; just synchronize */
2442         curr->get_node()->add_relseq_break(NULL);
2443 }
2444
2445 /**
2446  * Build up an initial set of all past writes that this 'read' action may read
2447  * from. This set is determined by the clock vector's "happens before"
2448  * relationship.
2449  * @param curr is the current ModelAction that we are exploring; it must be a
2450  * 'read' operation.
2451  */
2452 void ModelChecker::build_reads_from_past(ModelAction *curr)
2453 {
2454         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2455         unsigned int i;
2456         ASSERT(curr->is_read());
2457
2458         ModelAction *last_sc_write = NULL;
2459
2460         if (curr->is_seqcst())
2461                 last_sc_write = get_last_seq_cst_write(curr);
2462
2463         /* Iterate over all threads */
2464         for (i = 0; i < thrd_lists->size(); i++) {
2465                 /* Iterate over actions in thread, starting from most recent */
2466                 action_list_t *list = &(*thrd_lists)[i];
2467                 action_list_t::reverse_iterator rit;
2468                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2469                         ModelAction *act = *rit;
2470
2471                         /* Only consider 'write' actions */
2472                         if (!act->is_write() || act == curr)
2473                                 continue;
2474
2475                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2476                         bool allow_read = true;
2477
2478                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2479                                 allow_read = false;
2480                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2481                                 allow_read = false;
2482
2483                         if (allow_read) {
2484                                 /* Only add feasible reads */
2485                                 mo_graph->startChanges();
2486                                 r_modification_order(curr, act);
2487                                 if (!is_infeasible())
2488                                         curr->get_node()->add_read_from(act);
2489                                 mo_graph->rollbackChanges();
2490                         }
2491
2492                         /* Include at most one act per-thread that "happens before" curr */
2493                         if (act->happens_before(curr))
2494                                 break;
2495                 }
2496         }
2497         /* We may find no valid may-read-from only if the execution is doomed */
2498         if (!curr->get_node()->get_read_from_size()) {
2499                 priv->no_valid_reads = true;
2500                 set_assert();
2501         }
2502
2503         if (DBG_ENABLED()) {
2504                 model_print("Reached read action:\n");
2505                 curr->print();
2506                 model_print("Printing may_read_from\n");
2507                 curr->get_node()->print_may_read_from();
2508                 model_print("End printing may_read_from\n");
2509         }
2510 }
2511
2512 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2513 {
2514         while (true) {
2515                 /* UNINIT actions don't have a Node, and they never sleep */
2516                 if (write->is_uninitialized())
2517                         return true;
2518                 Node *prevnode = write->get_node()->get_parent();
2519
2520                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2521                 if (write->is_release() && thread_sleep)
2522                         return true;
2523                 if (!write->is_rmw()) {
2524                         return false;
2525                 }
2526                 if (write->get_reads_from() == NULL)
2527                         return true;
2528                 write = write->get_reads_from();
2529         }
2530 }
2531
2532 /**
2533  * @brief Create a new action representing an uninitialized atomic
2534  * @param location The memory location of the atomic object
2535  * @return A pointer to a new ModelAction
2536  */
2537 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2538 {
2539         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2540         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2541         act->create_cv(NULL);
2542         return act;
2543 }
2544
2545 static void print_list(action_list_t *list)
2546 {
2547         action_list_t::iterator it;
2548
2549         model_print("---------------------------------------------------------------------\n");
2550
2551         unsigned int hash = 0;
2552
2553         for (it = list->begin(); it != list->end(); it++) {
2554                 (*it)->print();
2555                 hash = hash^(hash<<3)^((*it)->hash());
2556         }
2557         model_print("HASH %u\n", hash);
2558         model_print("---------------------------------------------------------------------\n");
2559 }
2560
2561 #if SUPPORT_MOD_ORDER_DUMP
2562 void ModelChecker::dumpGraph(char *filename) const
2563 {
2564         char buffer[200];
2565         sprintf(buffer, "%s.dot", filename);
2566         FILE *file = fopen(buffer, "w");
2567         fprintf(file, "digraph %s {\n", filename);
2568         mo_graph->dumpNodes(file);
2569         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2570
2571         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2572                 ModelAction *action = *it;
2573                 if (action->is_read()) {
2574                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2575                         if (action->get_reads_from() != NULL)
2576                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2577                 }
2578                 if (thread_array[action->get_tid()] != NULL) {
2579                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2580                 }
2581
2582                 thread_array[action->get_tid()] = action;
2583         }
2584         fprintf(file, "}\n");
2585         model_free(thread_array);
2586         fclose(file);
2587 }
2588 #endif
2589
2590 /** @brief Prints an execution trace summary. */
2591 void ModelChecker::print_summary() const
2592 {
2593 #if SUPPORT_MOD_ORDER_DUMP
2594         char buffername[100];
2595         sprintf(buffername, "exec%04u", stats.num_total);
2596         mo_graph->dumpGraphToFile(buffername);
2597         sprintf(buffername, "graph%04u", stats.num_total);
2598         dumpGraph(buffername);
2599 #endif
2600
2601         model_print("Execution %d:", stats.num_total);
2602         if (isfeasibleprefix())
2603                 model_print("\n");
2604         else
2605                 print_infeasibility(" INFEASIBLE");
2606         print_list(action_trace);
2607         model_print("\n");
2608 }
2609
2610 /**
2611  * Add a Thread to the system for the first time. Should only be called once
2612  * per thread.
2613  * @param t The Thread to add
2614  */
2615 void ModelChecker::add_thread(Thread *t)
2616 {
2617         thread_map->put(id_to_int(t->get_id()), t);
2618         scheduler->add_thread(t);
2619 }
2620
2621 /**
2622  * Removes a thread from the scheduler.
2623  * @param the thread to remove.
2624  */
2625 void ModelChecker::remove_thread(Thread *t)
2626 {
2627         scheduler->remove_thread(t);
2628 }
2629
2630 /**
2631  * @brief Get a Thread reference by its ID
2632  * @param tid The Thread's ID
2633  * @return A Thread reference
2634  */
2635 Thread * ModelChecker::get_thread(thread_id_t tid) const
2636 {
2637         return thread_map->get(id_to_int(tid));
2638 }
2639
2640 /**
2641  * @brief Get a reference to the Thread in which a ModelAction was executed
2642  * @param act The ModelAction
2643  * @return A Thread reference
2644  */
2645 Thread * ModelChecker::get_thread(const ModelAction *act) const
2646 {
2647         return get_thread(act->get_tid());
2648 }
2649
2650 /**
2651  * @brief Check if a Thread is currently enabled
2652  * @param t The Thread to check
2653  * @return True if the Thread is currently enabled
2654  */
2655 bool ModelChecker::is_enabled(Thread *t) const
2656 {
2657         return scheduler->is_enabled(t);
2658 }
2659
2660 /**
2661  * @brief Check if a Thread is currently enabled
2662  * @param tid The ID of the Thread to check
2663  * @return True if the Thread is currently enabled
2664  */
2665 bool ModelChecker::is_enabled(thread_id_t tid) const
2666 {
2667         return scheduler->is_enabled(tid);
2668 }
2669
2670 /**
2671  * Switch from a model-checker context to a user-thread context. This is the
2672  * complement of ModelChecker::switch_to_master and must be called from the
2673  * model-checker context
2674  *
2675  * @param thread The user-thread to switch to
2676  */
2677 void ModelChecker::switch_from_master(Thread *thread)
2678 {
2679         scheduler->set_current_thread(thread);
2680         Thread::swap(&system_context, thread);
2681 }
2682
2683 /**
2684  * Switch from a user-context to the "master thread" context (a.k.a. system
2685  * context). This switch is made with the intention of exploring a particular
2686  * model-checking action (described by a ModelAction object). Must be called
2687  * from a user-thread context.
2688  *
2689  * @param act The current action that will be explored. May be NULL only if
2690  * trace is exiting via an assertion (see ModelChecker::set_assert and
2691  * ModelChecker::has_asserted).
2692  * @return Return the value returned by the current action
2693  */
2694 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2695 {
2696         DBG();
2697         Thread *old = thread_current();
2698         ASSERT(!old->get_pending());
2699         old->set_pending(act);
2700         if (Thread::swap(old, &system_context) < 0) {
2701                 perror("swap threads");
2702                 exit(EXIT_FAILURE);
2703         }
2704         return old->get_return_value();
2705 }
2706
2707 /**
2708  * Takes the next step in the execution, if possible.
2709  * @param curr The current step to take
2710  * @return Returns the next Thread to run, if any; NULL if this execution
2711  * should terminate
2712  */
2713 Thread * ModelChecker::take_step(ModelAction *curr)
2714 {
2715         Thread *curr_thrd = get_thread(curr);
2716         ASSERT(curr_thrd->get_state() == THREAD_READY);
2717
2718         curr = check_current_action(curr);
2719
2720         /* Infeasible -> don't take any more steps */
2721         if (is_infeasible())
2722                 return NULL;
2723         else if (isfeasibleprefix() && have_bug_reports()) {
2724                 set_assert();
2725                 return NULL;
2726         }
2727
2728         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2729                 return NULL;
2730
2731         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2732                 scheduler->remove_thread(curr_thrd);
2733
2734         Thread *next_thrd = get_next_thread(curr);
2735         /* Only ask for the next thread from Scheduler if we haven't chosen one
2736          * already */
2737         if (!next_thrd)
2738                 next_thrd = scheduler->select_next_thread();
2739
2740         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2741                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2742
2743         return next_thrd;
2744 }
2745
2746 /** Wrapper to run the user's main function, with appropriate arguments */
2747 void user_main_wrapper(void *)
2748 {
2749         user_main(model->params.argc, model->params.argv);
2750 }
2751
2752 /** @brief Run ModelChecker for the user program */
2753 void ModelChecker::run()
2754 {
2755         do {
2756                 thrd_t user_thread;
2757                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2758                 add_thread(t);
2759
2760                 do {
2761                         /*
2762                          * Stash next pending action(s) for thread(s). There
2763                          * should only need to stash one thread's action--the
2764                          * thread which just took a step--plus the first step
2765                          * for any newly-created thread
2766                          */
2767                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2768                                 thread_id_t tid = int_to_id(i);
2769                                 Thread *thr = get_thread(tid);
2770                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2771                                         switch_from_master(thr);
2772                                 }
2773                         }
2774
2775                         /* Catch assertions from prior take_step or from
2776                          * between-ModelAction bugs (e.g., data races) */
2777                         if (has_asserted())
2778                                 break;
2779
2780                         /* Consume the next action for a Thread */
2781                         ModelAction *curr = t->get_pending();
2782                         t->set_pending(NULL);
2783                         t = take_step(curr);
2784                 } while (t && !t->is_model_thread());
2785
2786                 /*
2787                  * Launch end-of-execution release sequence fixups only when
2788                  * the execution is otherwise feasible AND there are:
2789                  *
2790                  * (1) pending release sequences
2791                  * (2) pending assertions that could be invalidated by a change
2792                  * in clock vectors (i.e., data races)
2793                  * (3) no pending promises
2794                  */
2795                 while (!pending_rel_seqs->empty() &&
2796                                 is_feasible_prefix_ignore_relseq() &&
2797                                 !unrealizedraces.empty()) {
2798                         model_print("*** WARNING: release sequence fixup action "
2799                                         "(%zu pending release seuqence(s)) ***\n",
2800                                         pending_rel_seqs->size());
2801                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2802                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2803                                         model_thread);
2804                         take_step(fixup);
2805                 };
2806         } while (next_execution());
2807
2808         print_stats();
2809 }