7c9b4c07296bf05e57280c4eb4f33198f62dbe38
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* The correct sleep set is in the parent node. */
286                 execute_sleep_set();
287
288                 DEBUG("*** Divergence point ***\n");
289
290                 diverge = NULL;
291         } else {
292                 tid = next->get_tid();
293         }
294         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
295         ASSERT(tid != THREAD_ID_T_NONE);
296         return thread_map->get(id_to_int(tid));
297 }
298
299 /**
300  * We need to know what the next actions of all threads in the sleep
301  * set will be.  This method computes them and stores the actions at
302  * the corresponding thread object's pending action.
303  */
304
305 void ModelChecker::execute_sleep_set()
306 {
307         for (unsigned int i = 0; i < get_num_threads(); i++) {
308                 thread_id_t tid = int_to_id(i);
309                 Thread *thr = get_thread(tid);
310                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
311                         thr->get_pending()->set_sleep_flag();
312                 }
313         }
314 }
315
316 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
317 {
318         for (unsigned int i = 0; i < get_num_threads(); i++) {
319                 Thread *thr = get_thread(int_to_id(i));
320                 if (scheduler->is_sleep_set(thr)) {
321                         ModelAction *pending_act = thr->get_pending();
322                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
323                                 //Remove this thread from sleep set
324                                 scheduler->remove_sleep(thr);
325                 }
326         }
327 }
328
329 /** @brief Alert the model-checker that an incorrectly-ordered
330  * synchronization was made */
331 void ModelChecker::set_bad_synchronization()
332 {
333         priv->bad_synchronization = true;
334 }
335
336 /**
337  * Check whether the current trace has triggered an assertion which should halt
338  * its execution.
339  *
340  * @return True, if the execution should be aborted; false otherwise
341  */
342 bool ModelChecker::has_asserted() const
343 {
344         return priv->asserted;
345 }
346
347 /**
348  * Trigger a trace assertion which should cause this execution to be halted.
349  * This can be due to a detected bug or due to an infeasibility that should
350  * halt ASAP.
351  */
352 void ModelChecker::set_assert()
353 {
354         priv->asserted = true;
355 }
356
357 /**
358  * Check if we are in a deadlock. Should only be called at the end of an
359  * execution, although it should not give false positives in the middle of an
360  * execution (there should be some ENABLED thread).
361  *
362  * @return True if program is in a deadlock; false otherwise
363  */
364 bool ModelChecker::is_deadlocked() const
365 {
366         bool blocking_threads = false;
367         for (unsigned int i = 0; i < get_num_threads(); i++) {
368                 thread_id_t tid = int_to_id(i);
369                 if (is_enabled(tid))
370                         return false;
371                 Thread *t = get_thread(tid);
372                 if (!t->is_model_thread() && t->get_pending())
373                         blocking_threads = true;
374         }
375         return blocking_threads;
376 }
377
378 /**
379  * Check if this is a complete execution. That is, have all thread completed
380  * execution (rather than exiting because sleep sets have forced a redundant
381  * execution).
382  *
383  * @return True if the execution is complete.
384  */
385 bool ModelChecker::is_complete_execution() const
386 {
387         for (unsigned int i = 0; i < get_num_threads(); i++)
388                 if (is_enabled(int_to_id(i)))
389                         return false;
390         return true;
391 }
392
393 /**
394  * @brief Assert a bug in the executing program.
395  *
396  * Use this function to assert any sort of bug in the user program. If the
397  * current trace is feasible (actually, a prefix of some feasible execution),
398  * then this execution will be aborted, printing the appropriate message. If
399  * the current trace is not yet feasible, the error message will be stashed and
400  * printed if the execution ever becomes feasible.
401  *
402  * @param msg Descriptive message for the bug (do not include newline char)
403  * @return True if bug is immediately-feasible
404  */
405 bool ModelChecker::assert_bug(const char *msg)
406 {
407         priv->bugs.push_back(new bug_message(msg));
408
409         if (isfeasibleprefix()) {
410                 set_assert();
411                 return true;
412         }
413         return false;
414 }
415
416 /**
417  * @brief Assert a bug in the executing program, asserted by a user thread
418  * @see ModelChecker::assert_bug
419  * @param msg Descriptive message for the bug (do not include newline char)
420  */
421 void ModelChecker::assert_user_bug(const char *msg)
422 {
423         /* If feasible bug, bail out now */
424         if (assert_bug(msg))
425                 switch_to_master(NULL);
426 }
427
428 /** @return True, if any bugs have been reported for this execution */
429 bool ModelChecker::have_bug_reports() const
430 {
431         return priv->bugs.size() != 0;
432 }
433
434 /** @brief Print bug report listing for this execution (if any bugs exist) */
435 void ModelChecker::print_bugs() const
436 {
437         if (have_bug_reports()) {
438                 model_print("Bug report: %zu bug%s detected\n",
439                                 priv->bugs.size(),
440                                 priv->bugs.size() > 1 ? "s" : "");
441                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
442                         priv->bugs[i]->print();
443         }
444 }
445
446 /**
447  * @brief Record end-of-execution stats
448  *
449  * Must be run when exiting an execution. Records various stats.
450  * @see struct execution_stats
451  */
452 void ModelChecker::record_stats()
453 {
454         stats.num_total++;
455         if (!isfeasibleprefix())
456                 stats.num_infeasible++;
457         else if (have_bug_reports())
458                 stats.num_buggy_executions++;
459         else if (is_complete_execution())
460                 stats.num_complete++;
461         else
462                 stats.num_redundant++;
463 }
464
465 /** @brief Print execution stats */
466 void ModelChecker::print_stats() const
467 {
468         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
469         model_print("Number of redundant executions: %d\n", stats.num_redundant);
470         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
471         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
472         model_print("Total executions: %d\n", stats.num_total);
473         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
474 }
475
476 /**
477  * @brief End-of-exeuction print
478  * @param printbugs Should any existing bugs be printed?
479  */
480 void ModelChecker::print_execution(bool printbugs) const
481 {
482         print_program_output();
483
484         if (DBG_ENABLED() || params.verbose) {
485                 model_print("Earliest divergence point since last feasible execution:\n");
486                 if (earliest_diverge)
487                         earliest_diverge->print();
488                 else
489                         model_print("(Not set)\n");
490
491                 model_print("\n");
492                 print_stats();
493         }
494
495         /* Don't print invalid bugs */
496         if (printbugs)
497                 print_bugs();
498
499         model_print("\n");
500         print_summary();
501 }
502
503 /**
504  * Queries the model-checker for more executions to explore and, if one
505  * exists, resets the model-checker state to execute a new execution.
506  *
507  * @return If there are more executions to explore, return true. Otherwise,
508  * return false.
509  */
510 bool ModelChecker::next_execution()
511 {
512         DBG();
513         /* Is this execution a feasible execution that's worth bug-checking? */
514         bool complete = isfeasibleprefix() && (is_complete_execution() ||
515                         have_bug_reports());
516
517         /* End-of-execution bug checks */
518         if (complete) {
519                 if (is_deadlocked())
520                         assert_bug("Deadlock detected");
521
522                 checkDataRaces();
523         }
524
525         record_stats();
526
527         /* Output */
528         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
529                 print_execution(complete);
530         else
531                 clear_program_output();
532
533         if (complete)
534                 earliest_diverge = NULL;
535
536         if ((diverge = get_next_backtrack()) == NULL)
537                 return false;
538
539         if (DBG_ENABLED()) {
540                 model_print("Next execution will diverge at:\n");
541                 diverge->print();
542         }
543
544         reset_to_initial_state();
545         return true;
546 }
547
548 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
549 {
550         switch (act->get_type()) {
551         case ATOMIC_FENCE:
552         case ATOMIC_READ:
553         case ATOMIC_WRITE:
554         case ATOMIC_RMW: {
555                 /* Optimization: relaxed operations don't need backtracking */
556                 if (act->is_relaxed())
557                         return NULL;
558                 /* linear search: from most recent to oldest */
559                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
560                 action_list_t::reverse_iterator rit;
561                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
562                         ModelAction *prev = *rit;
563                         if (prev->could_synchronize_with(act))
564                                 return prev;
565                 }
566                 break;
567         }
568         case ATOMIC_LOCK:
569         case ATOMIC_TRYLOCK: {
570                 /* linear search: from most recent to oldest */
571                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
572                 action_list_t::reverse_iterator rit;
573                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
574                         ModelAction *prev = *rit;
575                         if (act->is_conflicting_lock(prev))
576                                 return prev;
577                 }
578                 break;
579         }
580         case ATOMIC_UNLOCK: {
581                 /* linear search: from most recent to oldest */
582                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
583                 action_list_t::reverse_iterator rit;
584                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
585                         ModelAction *prev = *rit;
586                         if (!act->same_thread(prev) && prev->is_failed_trylock())
587                                 return prev;
588                 }
589                 break;
590         }
591         case ATOMIC_WAIT: {
592                 /* linear search: from most recent to oldest */
593                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
594                 action_list_t::reverse_iterator rit;
595                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
596                         ModelAction *prev = *rit;
597                         if (!act->same_thread(prev) && prev->is_failed_trylock())
598                                 return prev;
599                         if (!act->same_thread(prev) && prev->is_notify())
600                                 return prev;
601                 }
602                 break;
603         }
604
605         case ATOMIC_NOTIFY_ALL:
606         case ATOMIC_NOTIFY_ONE: {
607                 /* linear search: from most recent to oldest */
608                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
609                 action_list_t::reverse_iterator rit;
610                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
611                         ModelAction *prev = *rit;
612                         if (!act->same_thread(prev) && prev->is_wait())
613                                 return prev;
614                 }
615                 break;
616         }
617         default:
618                 break;
619         }
620         return NULL;
621 }
622
623 /** This method finds backtracking points where we should try to
624  * reorder the parameter ModelAction against.
625  *
626  * @param the ModelAction to find backtracking points for.
627  */
628 void ModelChecker::set_backtracking(ModelAction *act)
629 {
630         Thread *t = get_thread(act);
631         ModelAction *prev = get_last_conflict(act);
632         if (prev == NULL)
633                 return;
634
635         Node *node = prev->get_node()->get_parent();
636
637         int low_tid, high_tid;
638         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
639                 low_tid = id_to_int(act->get_tid());
640                 high_tid = low_tid + 1;
641         } else {
642                 low_tid = 0;
643                 high_tid = get_num_threads();
644         }
645
646         for (int i = low_tid; i < high_tid; i++) {
647                 thread_id_t tid = int_to_id(i);
648
649                 /* Make sure this thread can be enabled here. */
650                 if (i >= node->get_num_threads())
651                         break;
652
653                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
654                 if (node->enabled_status(tid) != THREAD_ENABLED)
655                         continue;
656
657                 /* Check if this has been explored already */
658                 if (node->has_been_explored(tid))
659                         continue;
660
661                 /* See if fairness allows */
662                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
663                         bool unfair = false;
664                         for (int t = 0; t < node->get_num_threads(); t++) {
665                                 thread_id_t tother = int_to_id(t);
666                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
667                                         unfair = true;
668                                         break;
669                                 }
670                         }
671                         if (unfair)
672                                 continue;
673                 }
674                 /* Cache the latest backtracking point */
675                 set_latest_backtrack(prev);
676
677                 /* If this is a new backtracking point, mark the tree */
678                 if (!node->set_backtrack(tid))
679                         continue;
680                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
681                                         id_to_int(prev->get_tid()),
682                                         id_to_int(t->get_id()));
683                 if (DBG_ENABLED()) {
684                         prev->print();
685                         act->print();
686                 }
687         }
688 }
689
690 /**
691  * @brief Cache the a backtracking point as the "most recent", if eligible
692  *
693  * Note that this does not prepare the NodeStack for this backtracking
694  * operation, it only caches the action on a per-execution basis
695  *
696  * @param act The operation at which we should explore a different next action
697  * (i.e., backtracking point)
698  * @return True, if this action is now the most recent backtracking point;
699  * false otherwise
700  */
701 bool ModelChecker::set_latest_backtrack(ModelAction *act)
702 {
703         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
704                 priv->next_backtrack = act;
705                 return true;
706         }
707         return false;
708 }
709
710 /**
711  * Returns last backtracking point. The model checker will explore a different
712  * path for this point in the next execution.
713  * @return The ModelAction at which the next execution should diverge.
714  */
715 ModelAction * ModelChecker::get_next_backtrack()
716 {
717         ModelAction *next = priv->next_backtrack;
718         priv->next_backtrack = NULL;
719         return next;
720 }
721
722 /**
723  * Processes a read or rmw model action.
724  * @param curr is the read model action to process.
725  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
726  * @return True if processing this read updates the mo_graph.
727  */
728 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
729 {
730         uint64_t value = VALUE_NONE;
731         bool updated = false;
732         while (true) {
733                 const ModelAction *reads_from = curr->get_node()->get_read_from();
734                 if (reads_from != NULL) {
735                         mo_graph->startChanges();
736
737                         value = reads_from->get_value();
738                         bool r_status = false;
739
740                         if (!second_part_of_rmw) {
741                                 check_recency(curr, reads_from);
742                                 r_status = r_modification_order(curr, reads_from);
743                         }
744
745                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
746                                 mo_graph->rollbackChanges();
747                                 priv->too_many_reads = false;
748                                 continue;
749                         }
750
751                         read_from(curr, reads_from);
752                         mo_graph->commitChanges();
753                         mo_check_promises(curr, true);
754
755                         updated |= r_status;
756                 } else if (!second_part_of_rmw) {
757                         /* Read from future value */
758                         struct future_value fv = curr->get_node()->get_future_value();
759                         Promise *promise = new Promise(curr, fv);
760                         value = fv.value;
761                         curr->set_read_from_promise(promise);
762                         promises->push_back(promise);
763                         mo_graph->startChanges();
764                         updated = r_modification_order(curr, promise);
765                         mo_graph->commitChanges();
766                 }
767                 get_thread(curr)->set_return_value(value);
768                 return updated;
769         }
770 }
771
772 /**
773  * Processes a lock, trylock, or unlock model action.  @param curr is
774  * the read model action to process.
775  *
776  * The try lock operation checks whether the lock is taken.  If not,
777  * it falls to the normal lock operation case.  If so, it returns
778  * fail.
779  *
780  * The lock operation has already been checked that it is enabled, so
781  * it just grabs the lock and synchronizes with the previous unlock.
782  *
783  * The unlock operation has to re-enable all of the threads that are
784  * waiting on the lock.
785  *
786  * @return True if synchronization was updated; false otherwise
787  */
788 bool ModelChecker::process_mutex(ModelAction *curr)
789 {
790         std::mutex *mutex = NULL;
791         struct std::mutex_state *state = NULL;
792
793         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
794                 mutex = (std::mutex *)curr->get_location();
795                 state = mutex->get_state();
796         } else if (curr->is_wait()) {
797                 mutex = (std::mutex *)curr->get_value();
798                 state = mutex->get_state();
799         }
800
801         switch (curr->get_type()) {
802         case ATOMIC_TRYLOCK: {
803                 bool success = !state->islocked;
804                 curr->set_try_lock(success);
805                 if (!success) {
806                         get_thread(curr)->set_return_value(0);
807                         break;
808                 }
809                 get_thread(curr)->set_return_value(1);
810         }
811                 //otherwise fall into the lock case
812         case ATOMIC_LOCK: {
813                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
814                         assert_bug("Lock access before initialization");
815                 state->islocked = true;
816                 ModelAction *unlock = get_last_unlock(curr);
817                 //synchronize with the previous unlock statement
818                 if (unlock != NULL) {
819                         curr->synchronize_with(unlock);
820                         return true;
821                 }
822                 break;
823         }
824         case ATOMIC_UNLOCK: {
825                 //unlock the lock
826                 state->islocked = false;
827                 //wake up the other threads
828                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
829                 //activate all the waiting threads
830                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
831                         scheduler->wake(get_thread(*rit));
832                 }
833                 waiters->clear();
834                 break;
835         }
836         case ATOMIC_WAIT: {
837                 //unlock the lock
838                 state->islocked = false;
839                 //wake up the other threads
840                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
841                 //activate all the waiting threads
842                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
843                         scheduler->wake(get_thread(*rit));
844                 }
845                 waiters->clear();
846                 //check whether we should go to sleep or not...simulate spurious failures
847                 if (curr->get_node()->get_misc() == 0) {
848                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
849                         //disable us
850                         scheduler->sleep(get_thread(curr));
851                 }
852                 break;
853         }
854         case ATOMIC_NOTIFY_ALL: {
855                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
856                 //activate all the waiting threads
857                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
858                         scheduler->wake(get_thread(*rit));
859                 }
860                 waiters->clear();
861                 break;
862         }
863         case ATOMIC_NOTIFY_ONE: {
864                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
865                 int wakeupthread = curr->get_node()->get_misc();
866                 action_list_t::iterator it = waiters->begin();
867                 advance(it, wakeupthread);
868                 scheduler->wake(get_thread(*it));
869                 waiters->erase(it);
870                 break;
871         }
872
873         default:
874                 ASSERT(0);
875         }
876         return false;
877 }
878
879 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
880 {
881         /* Do more ambitious checks now that mo is more complete */
882         if (mo_may_allow(writer, reader)) {
883                 Node *node = reader->get_node();
884
885                 /* Find an ancestor thread which exists at the time of the reader */
886                 Thread *write_thread = get_thread(writer);
887                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
888                         write_thread = write_thread->get_parent();
889
890                 struct future_value fv = {
891                         writer->get_value(),
892                         writer->get_seq_number() + params.maxfuturedelay,
893                         write_thread->get_id(),
894                 };
895                 if (node->add_future_value(fv))
896                         set_latest_backtrack(reader);
897         }
898 }
899
900 /**
901  * Process a write ModelAction
902  * @param curr The ModelAction to process
903  * @return True if the mo_graph was updated or promises were resolved
904  */
905 bool ModelChecker::process_write(ModelAction *curr)
906 {
907         bool updated_mod_order = w_modification_order(curr);
908         bool updated_promises = resolve_promises(curr);
909
910         if (promises->size() == 0) {
911                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
912                         struct PendingFutureValue pfv = (*futurevalues)[i];
913                         add_future_value(pfv.writer, pfv.act);
914                 }
915                 futurevalues->clear();
916         }
917
918         mo_graph->commitChanges();
919         mo_check_promises(curr, false);
920
921         get_thread(curr)->set_return_value(VALUE_NONE);
922         return updated_mod_order || updated_promises;
923 }
924
925 /**
926  * Process a fence ModelAction
927  * @param curr The ModelAction to process
928  * @return True if synchronization was updated
929  */
930 bool ModelChecker::process_fence(ModelAction *curr)
931 {
932         /*
933          * fence-relaxed: no-op
934          * fence-release: only log the occurence (not in this function), for
935          *   use in later synchronization
936          * fence-acquire (this function): search for hypothetical release
937          *   sequences
938          */
939         bool updated = false;
940         if (curr->is_acquire()) {
941                 action_list_t *list = action_trace;
942                 action_list_t::reverse_iterator rit;
943                 /* Find X : is_read(X) && X --sb-> curr */
944                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
945                         ModelAction *act = *rit;
946                         if (act == curr)
947                                 continue;
948                         if (act->get_tid() != curr->get_tid())
949                                 continue;
950                         /* Stop at the beginning of the thread */
951                         if (act->is_thread_start())
952                                 break;
953                         /* Stop once we reach a prior fence-acquire */
954                         if (act->is_fence() && act->is_acquire())
955                                 break;
956                         if (!act->is_read())
957                                 continue;
958                         /* read-acquire will find its own release sequences */
959                         if (act->is_acquire())
960                                 continue;
961
962                         /* Establish hypothetical release sequences */
963                         rel_heads_list_t release_heads;
964                         get_release_seq_heads(curr, act, &release_heads);
965                         for (unsigned int i = 0; i < release_heads.size(); i++)
966                                 if (!curr->synchronize_with(release_heads[i]))
967                                         set_bad_synchronization();
968                         if (release_heads.size() != 0)
969                                 updated = true;
970                 }
971         }
972         return updated;
973 }
974
975 /**
976  * @brief Process the current action for thread-related activity
977  *
978  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
979  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
980  * synchronization, etc.  This function is a no-op for non-THREAD actions
981  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
982  *
983  * @param curr The current action
984  * @return True if synchronization was updated or a thread completed
985  */
986 bool ModelChecker::process_thread_action(ModelAction *curr)
987 {
988         bool updated = false;
989
990         switch (curr->get_type()) {
991         case THREAD_CREATE: {
992                 thrd_t *thrd = (thrd_t *)curr->get_location();
993                 struct thread_params *params = (struct thread_params *)curr->get_value();
994                 Thread *th = new Thread(thrd, params->func, params->arg);
995                 add_thread(th);
996                 th->set_creation(curr);
997                 /* Promises can be satisfied by children */
998                 for (unsigned int i = 0; i < promises->size(); i++) {
999                         Promise *promise = (*promises)[i];
1000                         if (promise->thread_is_available(curr->get_tid()))
1001                                 promise->add_thread(th->get_id());
1002                 }
1003                 break;
1004         }
1005         case THREAD_JOIN: {
1006                 Thread *blocking = curr->get_thread_operand();
1007                 ModelAction *act = get_last_action(blocking->get_id());
1008                 curr->synchronize_with(act);
1009                 updated = true; /* trigger rel-seq checks */
1010                 break;
1011         }
1012         case THREAD_FINISH: {
1013                 Thread *th = get_thread(curr);
1014                 while (!th->wait_list_empty()) {
1015                         ModelAction *act = th->pop_wait_list();
1016                         scheduler->wake(get_thread(act));
1017                 }
1018                 th->complete();
1019                 /* Completed thread can't satisfy promises */
1020                 for (unsigned int i = 0; i < promises->size(); i++) {
1021                         Promise *promise = (*promises)[i];
1022                         if (promise->thread_is_available(th->get_id()))
1023                                 if (promise->eliminate_thread(th->get_id()))
1024                                         priv->failed_promise = true;
1025                 }
1026                 updated = true; /* trigger rel-seq checks */
1027                 break;
1028         }
1029         case THREAD_START: {
1030                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1031                 break;
1032         }
1033         default:
1034                 break;
1035         }
1036
1037         return updated;
1038 }
1039
1040 /**
1041  * @brief Process the current action for release sequence fixup activity
1042  *
1043  * Performs model-checker release sequence fixups for the current action,
1044  * forcing a single pending release sequence to break (with a given, potential
1045  * "loose" write) or to complete (i.e., synchronize). If a pending release
1046  * sequence forms a complete release sequence, then we must perform the fixup
1047  * synchronization, mo_graph additions, etc.
1048  *
1049  * @param curr The current action; must be a release sequence fixup action
1050  * @param work_queue The work queue to which to add work items as they are
1051  * generated
1052  */
1053 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1054 {
1055         const ModelAction *write = curr->get_node()->get_relseq_break();
1056         struct release_seq *sequence = pending_rel_seqs->back();
1057         pending_rel_seqs->pop_back();
1058         ASSERT(sequence);
1059         ModelAction *acquire = sequence->acquire;
1060         const ModelAction *rf = sequence->rf;
1061         const ModelAction *release = sequence->release;
1062         ASSERT(acquire);
1063         ASSERT(release);
1064         ASSERT(rf);
1065         ASSERT(release->same_thread(rf));
1066
1067         if (write == NULL) {
1068                 /**
1069                  * @todo Forcing a synchronization requires that we set
1070                  * modification order constraints. For instance, we can't allow
1071                  * a fixup sequence in which two separate read-acquire
1072                  * operations read from the same sequence, where the first one
1073                  * synchronizes and the other doesn't. Essentially, we can't
1074                  * allow any writes to insert themselves between 'release' and
1075                  * 'rf'
1076                  */
1077
1078                 /* Must synchronize */
1079                 if (!acquire->synchronize_with(release)) {
1080                         set_bad_synchronization();
1081                         return;
1082                 }
1083                 /* Re-check all pending release sequences */
1084                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1085                 /* Re-check act for mo_graph edges */
1086                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1087
1088                 /* propagate synchronization to later actions */
1089                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1090                 for (; (*rit) != acquire; rit++) {
1091                         ModelAction *propagate = *rit;
1092                         if (acquire->happens_before(propagate)) {
1093                                 propagate->synchronize_with(acquire);
1094                                 /* Re-check 'propagate' for mo_graph edges */
1095                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1096                         }
1097                 }
1098         } else {
1099                 /* Break release sequence with new edges:
1100                  *   release --mo--> write --mo--> rf */
1101                 mo_graph->addEdge(release, write);
1102                 mo_graph->addEdge(write, rf);
1103         }
1104
1105         /* See if we have realized a data race */
1106         checkDataRaces();
1107 }
1108
1109 /**
1110  * Initialize the current action by performing one or more of the following
1111  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1112  * in the NodeStack, manipulating backtracking sets, allocating and
1113  * initializing clock vectors, and computing the promises to fulfill.
1114  *
1115  * @param curr The current action, as passed from the user context; may be
1116  * freed/invalidated after the execution of this function, with a different
1117  * action "returned" its place (pass-by-reference)
1118  * @return True if curr is a newly-explored action; false otherwise
1119  */
1120 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1121 {
1122         ModelAction *newcurr;
1123
1124         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1125                 newcurr = process_rmw(*curr);
1126                 delete *curr;
1127
1128                 if (newcurr->is_rmw())
1129                         compute_promises(newcurr);
1130
1131                 *curr = newcurr;
1132                 return false;
1133         }
1134
1135         (*curr)->set_seq_number(get_next_seq_num());
1136
1137         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1138         if (newcurr) {
1139                 /* First restore type and order in case of RMW operation */
1140                 if ((*curr)->is_rmwr())
1141                         newcurr->copy_typeandorder(*curr);
1142
1143                 ASSERT((*curr)->get_location() == newcurr->get_location());
1144                 newcurr->copy_from_new(*curr);
1145
1146                 /* Discard duplicate ModelAction; use action from NodeStack */
1147                 delete *curr;
1148
1149                 /* Always compute new clock vector */
1150                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1151
1152                 *curr = newcurr;
1153                 return false; /* Action was explored previously */
1154         } else {
1155                 newcurr = *curr;
1156
1157                 /* Always compute new clock vector */
1158                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1159
1160                 /* Assign most recent release fence */
1161                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1162
1163                 /*
1164                  * Perform one-time actions when pushing new ModelAction onto
1165                  * NodeStack
1166                  */
1167                 if (newcurr->is_write())
1168                         compute_promises(newcurr);
1169                 else if (newcurr->is_relseq_fixup())
1170                         compute_relseq_breakwrites(newcurr);
1171                 else if (newcurr->is_wait())
1172                         newcurr->get_node()->set_misc_max(2);
1173                 else if (newcurr->is_notify_one()) {
1174                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1175                 }
1176                 return true; /* This was a new ModelAction */
1177         }
1178 }
1179
1180 /**
1181  * @brief Establish reads-from relation between two actions
1182  *
1183  * Perform basic operations involved with establishing a concrete rf relation,
1184  * including setting the ModelAction data and checking for release sequences.
1185  *
1186  * @param act The action that is reading (must be a read)
1187  * @param rf The action from which we are reading (must be a write)
1188  *
1189  * @return True if this read established synchronization
1190  */
1191 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1192 {
1193         act->set_read_from(rf);
1194         if (rf != NULL && act->is_acquire()) {
1195                 rel_heads_list_t release_heads;
1196                 get_release_seq_heads(act, act, &release_heads);
1197                 int num_heads = release_heads.size();
1198                 for (unsigned int i = 0; i < release_heads.size(); i++)
1199                         if (!act->synchronize_with(release_heads[i])) {
1200                                 set_bad_synchronization();
1201                                 num_heads--;
1202                         }
1203                 return num_heads > 0;
1204         }
1205         return false;
1206 }
1207
1208 /**
1209  * @brief Check whether a model action is enabled.
1210  *
1211  * Checks whether a lock or join operation would be successful (i.e., is the
1212  * lock already locked, or is the joined thread already complete). If not, put
1213  * the action in a waiter list.
1214  *
1215  * @param curr is the ModelAction to check whether it is enabled.
1216  * @return a bool that indicates whether the action is enabled.
1217  */
1218 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1219         if (curr->is_lock()) {
1220                 std::mutex *lock = (std::mutex *)curr->get_location();
1221                 struct std::mutex_state *state = lock->get_state();
1222                 if (state->islocked) {
1223                         //Stick the action in the appropriate waiting queue
1224                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1225                         return false;
1226                 }
1227         } else if (curr->get_type() == THREAD_JOIN) {
1228                 Thread *blocking = (Thread *)curr->get_location();
1229                 if (!blocking->is_complete()) {
1230                         blocking->push_wait_list(curr);
1231                         return false;
1232                 }
1233         }
1234
1235         return true;
1236 }
1237
1238 /**
1239  * This is the heart of the model checker routine. It performs model-checking
1240  * actions corresponding to a given "current action." Among other processes, it
1241  * calculates reads-from relationships, updates synchronization clock vectors,
1242  * forms a memory_order constraints graph, and handles replay/backtrack
1243  * execution when running permutations of previously-observed executions.
1244  *
1245  * @param curr The current action to process
1246  * @return The ModelAction that is actually executed; may be different than
1247  * curr; may be NULL, if the current action is not enabled to run
1248  */
1249 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1250 {
1251         ASSERT(curr);
1252         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1253
1254         if (!check_action_enabled(curr)) {
1255                 /* Make the execution look like we chose to run this action
1256                  * much later, when a lock/join can succeed */
1257                 get_thread(curr)->set_pending(curr);
1258                 scheduler->sleep(get_thread(curr));
1259                 return NULL;
1260         }
1261
1262         bool newly_explored = initialize_curr_action(&curr);
1263
1264         DBG();
1265         if (DBG_ENABLED())
1266                 curr->print();
1267
1268         wake_up_sleeping_actions(curr);
1269
1270         /* Add the action to lists before any other model-checking tasks */
1271         if (!second_part_of_rmw)
1272                 add_action_to_lists(curr);
1273
1274         /* Build may_read_from set for newly-created actions */
1275         if (newly_explored && curr->is_read())
1276                 build_reads_from_past(curr);
1277
1278         /* Initialize work_queue with the "current action" work */
1279         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1280         while (!work_queue.empty() && !has_asserted()) {
1281                 WorkQueueEntry work = work_queue.front();
1282                 work_queue.pop_front();
1283
1284                 switch (work.type) {
1285                 case WORK_CHECK_CURR_ACTION: {
1286                         ModelAction *act = work.action;
1287                         bool update = false; /* update this location's release seq's */
1288                         bool update_all = false; /* update all release seq's */
1289
1290                         if (process_thread_action(curr))
1291                                 update_all = true;
1292
1293                         if (act->is_read() && process_read(act, second_part_of_rmw))
1294                                 update = true;
1295
1296                         if (act->is_write() && process_write(act))
1297                                 update = true;
1298
1299                         if (act->is_fence() && process_fence(act))
1300                                 update_all = true;
1301
1302                         if (act->is_mutex_op() && process_mutex(act))
1303                                 update_all = true;
1304
1305                         if (act->is_relseq_fixup())
1306                                 process_relseq_fixup(curr, &work_queue);
1307
1308                         if (update_all)
1309                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1310                         else if (update)
1311                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1312                         break;
1313                 }
1314                 case WORK_CHECK_RELEASE_SEQ:
1315                         resolve_release_sequences(work.location, &work_queue);
1316                         break;
1317                 case WORK_CHECK_MO_EDGES: {
1318                         /** @todo Complete verification of work_queue */
1319                         ModelAction *act = work.action;
1320                         bool updated = false;
1321
1322                         if (act->is_read()) {
1323                                 const ModelAction *rf = act->get_reads_from();
1324                                 const Promise *promise = act->get_reads_from_promise();
1325                                 if (rf) {
1326                                         if (r_modification_order(act, rf))
1327                                                 updated = true;
1328                                 } else if (promise) {
1329                                         if (r_modification_order(act, promise))
1330                                                 updated = true;
1331                                 }
1332                         }
1333                         if (act->is_write()) {
1334                                 if (w_modification_order(act))
1335                                         updated = true;
1336                         }
1337                         mo_graph->commitChanges();
1338
1339                         if (updated)
1340                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1341                         break;
1342                 }
1343                 default:
1344                         ASSERT(false);
1345                         break;
1346                 }
1347         }
1348
1349         check_curr_backtracking(curr);
1350         set_backtracking(curr);
1351         return curr;
1352 }
1353
1354 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1355 {
1356         Node *currnode = curr->get_node();
1357         Node *parnode = currnode->get_parent();
1358
1359         if ((parnode && !parnode->backtrack_empty()) ||
1360                          !currnode->misc_empty() ||
1361                          !currnode->read_from_empty() ||
1362                          !currnode->future_value_empty() ||
1363                          !currnode->promise_empty() ||
1364                          !currnode->relseq_break_empty()) {
1365                 set_latest_backtrack(curr);
1366         }
1367 }
1368
1369 bool ModelChecker::promises_expired() const
1370 {
1371         for (unsigned int i = 0; i < promises->size(); i++) {
1372                 Promise *promise = (*promises)[i];
1373                 if (promise->get_expiration() < priv->used_sequence_numbers)
1374                         return true;
1375         }
1376         return false;
1377 }
1378
1379 /**
1380  * This is the strongest feasibility check available.
1381  * @return whether the current trace (partial or complete) must be a prefix of
1382  * a feasible trace.
1383  */
1384 bool ModelChecker::isfeasibleprefix() const
1385 {
1386         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1387 }
1388
1389 /**
1390  * Print disagnostic information about an infeasible execution
1391  * @param prefix A string to prefix the output with; if NULL, then a default
1392  * message prefix will be provided
1393  */
1394 void ModelChecker::print_infeasibility(const char *prefix) const
1395 {
1396         char buf[100];
1397         char *ptr = buf;
1398         if (mo_graph->checkForCycles())
1399                 ptr += sprintf(ptr, "[mo cycle]");
1400         if (priv->failed_promise)
1401                 ptr += sprintf(ptr, "[failed promise]");
1402         if (priv->too_many_reads)
1403                 ptr += sprintf(ptr, "[too many reads]");
1404         if (priv->no_valid_reads)
1405                 ptr += sprintf(ptr, "[no valid reads-from]");
1406         if (priv->bad_synchronization)
1407                 ptr += sprintf(ptr, "[bad sw ordering]");
1408         if (promises_expired())
1409                 ptr += sprintf(ptr, "[promise expired]");
1410         if (promises->size() != 0)
1411                 ptr += sprintf(ptr, "[unresolved promise]");
1412         if (ptr != buf)
1413                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1414 }
1415
1416 /**
1417  * Returns whether the current completed trace is feasible, except for pending
1418  * release sequences.
1419  */
1420 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1421 {
1422         return !is_infeasible() && promises->size() == 0;
1423 }
1424
1425 /**
1426  * Check if the current partial trace is infeasible. Does not check any
1427  * end-of-execution flags, which might rule out the execution. Thus, this is
1428  * useful only for ruling an execution as infeasible.
1429  * @return whether the current partial trace is infeasible.
1430  */
1431 bool ModelChecker::is_infeasible() const
1432 {
1433         return mo_graph->checkForCycles() ||
1434                 priv->no_valid_reads ||
1435                 priv->failed_promise ||
1436                 priv->too_many_reads ||
1437                 priv->bad_synchronization ||
1438                 promises_expired();
1439 }
1440
1441 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1442 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1443         ModelAction *lastread = get_last_action(act->get_tid());
1444         lastread->process_rmw(act);
1445         if (act->is_rmw()) {
1446                 if (lastread->get_reads_from())
1447                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1448                 else
1449                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1450                 mo_graph->commitChanges();
1451         }
1452         return lastread;
1453 }
1454
1455 /**
1456  * Checks whether a thread has read from the same write for too many times
1457  * without seeing the effects of a later write.
1458  *
1459  * Basic idea:
1460  * 1) there must a different write that we could read from that would satisfy the modification order,
1461  * 2) we must have read from the same value in excess of maxreads times, and
1462  * 3) that other write must have been in the reads_from set for maxreads times.
1463  *
1464  * If so, we decide that the execution is no longer feasible.
1465  */
1466 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1467 {
1468         if (params.maxreads != 0) {
1469                 if (curr->get_node()->get_read_from_size() <= 1)
1470                         return;
1471                 //Must make sure that execution is currently feasible...  We could
1472                 //accidentally clear by rolling back
1473                 if (is_infeasible())
1474                         return;
1475                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1476                 int tid = id_to_int(curr->get_tid());
1477
1478                 /* Skip checks */
1479                 if ((int)thrd_lists->size() <= tid)
1480                         return;
1481                 action_list_t *list = &(*thrd_lists)[tid];
1482
1483                 action_list_t::reverse_iterator rit = list->rbegin();
1484                 /* Skip past curr */
1485                 for (; (*rit) != curr; rit++)
1486                         ;
1487                 /* go past curr now */
1488                 rit++;
1489
1490                 action_list_t::reverse_iterator ritcopy = rit;
1491                 //See if we have enough reads from the same value
1492                 int count = 0;
1493                 for (; count < params.maxreads; rit++, count++) {
1494                         if (rit == list->rend())
1495                                 return;
1496                         ModelAction *act = *rit;
1497                         if (!act->is_read())
1498                                 return;
1499
1500                         if (act->get_reads_from() != rf)
1501                                 return;
1502                         if (act->get_node()->get_read_from_size() <= 1)
1503                                 return;
1504                 }
1505                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1506                         /* Get write */
1507                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1508
1509                         /* Need a different write */
1510                         if (write == rf)
1511                                 continue;
1512
1513                         /* Test to see whether this is a feasible write to read from */
1514                         /** NOTE: all members of read-from set should be
1515                          *  feasible, so we no longer check it here **/
1516
1517                         rit = ritcopy;
1518
1519                         bool feasiblewrite = true;
1520                         //new we need to see if this write works for everyone
1521
1522                         for (int loop = count; loop > 0; loop--, rit++) {
1523                                 ModelAction *act = *rit;
1524                                 bool foundvalue = false;
1525                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1526                                         if (act->get_node()->get_read_from_at(j) == write) {
1527                                                 foundvalue = true;
1528                                                 break;
1529                                         }
1530                                 }
1531                                 if (!foundvalue) {
1532                                         feasiblewrite = false;
1533                                         break;
1534                                 }
1535                         }
1536                         if (feasiblewrite) {
1537                                 priv->too_many_reads = true;
1538                                 return;
1539                         }
1540                 }
1541         }
1542 }
1543
1544 /**
1545  * Updates the mo_graph with the constraints imposed from the current
1546  * read.
1547  *
1548  * Basic idea is the following: Go through each other thread and find
1549  * the last action that happened before our read.  Two cases:
1550  *
1551  * (1) The action is a write => that write must either occur before
1552  * the write we read from or be the write we read from.
1553  *
1554  * (2) The action is a read => the write that that action read from
1555  * must occur before the write we read from or be the same write.
1556  *
1557  * @param curr The current action. Must be a read.
1558  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1559  * @return True if modification order edges were added; false otherwise
1560  */
1561 template <typename rf_type>
1562 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1563 {
1564         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1565         unsigned int i;
1566         bool added = false;
1567         ASSERT(curr->is_read());
1568
1569         /* Last SC fence in the current thread */
1570         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1571
1572         /* Iterate over all threads */
1573         for (i = 0; i < thrd_lists->size(); i++) {
1574                 /* Last SC fence in thread i */
1575                 ModelAction *last_sc_fence_thread_local = NULL;
1576                 if (int_to_id((int)i) != curr->get_tid())
1577                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1578
1579                 /* Last SC fence in thread i, before last SC fence in current thread */
1580                 ModelAction *last_sc_fence_thread_before = NULL;
1581                 if (last_sc_fence_local)
1582                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1583
1584                 /* Iterate over actions in thread, starting from most recent */
1585                 action_list_t *list = &(*thrd_lists)[i];
1586                 action_list_t::reverse_iterator rit;
1587                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1588                         ModelAction *act = *rit;
1589
1590                         if (act->is_write() && !act->equals(rf) && act != curr) {
1591                                 /* C++, Section 29.3 statement 5 */
1592                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1593                                                 *act < *last_sc_fence_thread_local) {
1594                                         added = mo_graph->addEdge(act, rf) || added;
1595                                         break;
1596                                 }
1597                                 /* C++, Section 29.3 statement 4 */
1598                                 else if (act->is_seqcst() && last_sc_fence_local &&
1599                                                 *act < *last_sc_fence_local) {
1600                                         added = mo_graph->addEdge(act, rf) || added;
1601                                         break;
1602                                 }
1603                                 /* C++, Section 29.3 statement 6 */
1604                                 else if (last_sc_fence_thread_before &&
1605                                                 *act < *last_sc_fence_thread_before) {
1606                                         added = mo_graph->addEdge(act, rf) || added;
1607                                         break;
1608                                 }
1609                         }
1610
1611                         /*
1612                          * Include at most one act per-thread that "happens
1613                          * before" curr. Don't consider reflexively.
1614                          */
1615                         if (act->happens_before(curr) && act != curr) {
1616                                 if (act->is_write()) {
1617                                         if (!act->equals(rf)) {
1618                                                 added = mo_graph->addEdge(act, rf) || added;
1619                                         }
1620                                 } else {
1621                                         const ModelAction *prevreadfrom = act->get_reads_from();
1622                                         //if the previous read is unresolved, keep going...
1623                                         if (prevreadfrom == NULL)
1624                                                 continue;
1625
1626                                         if (!prevreadfrom->equals(rf)) {
1627                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1628                                         }
1629                                 }
1630                                 break;
1631                         }
1632                 }
1633         }
1634
1635         /*
1636          * All compatible, thread-exclusive promises must be ordered after any
1637          * concrete loads from the same thread
1638          */
1639         for (unsigned int i = 0; i < promises->size(); i++)
1640                 if ((*promises)[i]->is_compatible_exclusive(curr))
1641                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1642
1643         return added;
1644 }
1645
1646 /**
1647  * Updates the mo_graph with the constraints imposed from the current write.
1648  *
1649  * Basic idea is the following: Go through each other thread and find
1650  * the lastest action that happened before our write.  Two cases:
1651  *
1652  * (1) The action is a write => that write must occur before
1653  * the current write
1654  *
1655  * (2) The action is a read => the write that that action read from
1656  * must occur before the current write.
1657  *
1658  * This method also handles two other issues:
1659  *
1660  * (I) Sequential Consistency: Making sure that if the current write is
1661  * seq_cst, that it occurs after the previous seq_cst write.
1662  *
1663  * (II) Sending the write back to non-synchronizing reads.
1664  *
1665  * @param curr The current action. Must be a write.
1666  * @return True if modification order edges were added; false otherwise
1667  */
1668 bool ModelChecker::w_modification_order(ModelAction *curr)
1669 {
1670         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1671         unsigned int i;
1672         bool added = false;
1673         ASSERT(curr->is_write());
1674
1675         if (curr->is_seqcst()) {
1676                 /* We have to at least see the last sequentially consistent write,
1677                          so we are initialized. */
1678                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1679                 if (last_seq_cst != NULL) {
1680                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1681                 }
1682         }
1683
1684         /* Last SC fence in the current thread */
1685         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1686
1687         /* Iterate over all threads */
1688         for (i = 0; i < thrd_lists->size(); i++) {
1689                 /* Last SC fence in thread i, before last SC fence in current thread */
1690                 ModelAction *last_sc_fence_thread_before = NULL;
1691                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1692                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1693
1694                 /* Iterate over actions in thread, starting from most recent */
1695                 action_list_t *list = &(*thrd_lists)[i];
1696                 action_list_t::reverse_iterator rit;
1697                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1698                         ModelAction *act = *rit;
1699                         if (act == curr) {
1700                                 /*
1701                                  * 1) If RMW and it actually read from something, then we
1702                                  * already have all relevant edges, so just skip to next
1703                                  * thread.
1704                                  *
1705                                  * 2) If RMW and it didn't read from anything, we should
1706                                  * whatever edge we can get to speed up convergence.
1707                                  *
1708                                  * 3) If normal write, we need to look at earlier actions, so
1709                                  * continue processing list.
1710                                  */
1711                                 if (curr->is_rmw()) {
1712                                         if (curr->get_reads_from() != NULL)
1713                                                 break;
1714                                         else
1715                                                 continue;
1716                                 } else
1717                                         continue;
1718                         }
1719
1720                         /* C++, Section 29.3 statement 7 */
1721                         if (last_sc_fence_thread_before && act->is_write() &&
1722                                         *act < *last_sc_fence_thread_before) {
1723                                 added = mo_graph->addEdge(act, curr) || added;
1724                                 break;
1725                         }
1726
1727                         /*
1728                          * Include at most one act per-thread that "happens
1729                          * before" curr
1730                          */
1731                         if (act->happens_before(curr)) {
1732                                 /*
1733                                  * Note: if act is RMW, just add edge:
1734                                  *   act --mo--> curr
1735                                  * The following edge should be handled elsewhere:
1736                                  *   readfrom(act) --mo--> act
1737                                  */
1738                                 if (act->is_write())
1739                                         added = mo_graph->addEdge(act, curr) || added;
1740                                 else if (act->is_read()) {
1741                                         //if previous read accessed a null, just keep going
1742                                         if (act->get_reads_from() == NULL)
1743                                                 continue;
1744                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1745                                 }
1746                                 break;
1747                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1748                                                      !act->same_thread(curr)) {
1749                                 /* We have an action that:
1750                                    (1) did not happen before us
1751                                    (2) is a read and we are a write
1752                                    (3) cannot synchronize with us
1753                                    (4) is in a different thread
1754                                    =>
1755                                    that read could potentially read from our write.  Note that
1756                                    these checks are overly conservative at this point, we'll
1757                                    do more checks before actually removing the
1758                                    pendingfuturevalue.
1759
1760                                  */
1761                                 if (thin_air_constraint_may_allow(curr, act)) {
1762                                         if (!is_infeasible())
1763                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1764                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1765                                                 add_future_value(curr, act);
1766                                 }
1767                         }
1768                 }
1769         }
1770
1771         /*
1772          * All compatible, thread-exclusive promises must be ordered after any
1773          * concrete stores to the same thread, or else they can be merged with
1774          * this store later
1775          */
1776         for (unsigned int i = 0; i < promises->size(); i++)
1777                 if ((*promises)[i]->is_compatible_exclusive(curr))
1778                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1779
1780         return added;
1781 }
1782
1783 /** Arbitrary reads from the future are not allowed.  Section 29.3
1784  * part 9 places some constraints.  This method checks one result of constraint
1785  * constraint.  Others require compiler support. */
1786 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1787 {
1788         if (!writer->is_rmw())
1789                 return true;
1790
1791         if (!reader->is_rmw())
1792                 return true;
1793
1794         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1795                 if (search == reader)
1796                         return false;
1797                 if (search->get_tid() == reader->get_tid() &&
1798                                 search->happens_before(reader))
1799                         break;
1800         }
1801
1802         return true;
1803 }
1804
1805 /**
1806  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1807  * some constraints. This method checks one the following constraint (others
1808  * require compiler support):
1809  *
1810  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1811  */
1812 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1813 {
1814         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1815         unsigned int i;
1816         /* Iterate over all threads */
1817         for (i = 0; i < thrd_lists->size(); i++) {
1818                 const ModelAction *write_after_read = NULL;
1819
1820                 /* Iterate over actions in thread, starting from most recent */
1821                 action_list_t *list = &(*thrd_lists)[i];
1822                 action_list_t::reverse_iterator rit;
1823                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1824                         ModelAction *act = *rit;
1825
1826                         /* Don't disallow due to act == reader */
1827                         if (!reader->happens_before(act) || reader == act)
1828                                 break;
1829                         else if (act->is_write())
1830                                 write_after_read = act;
1831                         else if (act->is_read() && act->get_reads_from() != NULL)
1832                                 write_after_read = act->get_reads_from();
1833                 }
1834
1835                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1836                         return false;
1837         }
1838         return true;
1839 }
1840
1841 /**
1842  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1843  * The ModelAction under consideration is expected to be taking part in
1844  * release/acquire synchronization as an object of the "reads from" relation.
1845  * Note that this can only provide release sequence support for RMW chains
1846  * which do not read from the future, as those actions cannot be traced until
1847  * their "promise" is fulfilled. Similarly, we may not even establish the
1848  * presence of a release sequence with certainty, as some modification order
1849  * constraints may be decided further in the future. Thus, this function
1850  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1851  * and a boolean representing certainty.
1852  *
1853  * @param rf The action that might be part of a release sequence. Must be a
1854  * write.
1855  * @param release_heads A pass-by-reference style return parameter. After
1856  * execution of this function, release_heads will contain the heads of all the
1857  * relevant release sequences, if any exists with certainty
1858  * @param pending A pass-by-reference style return parameter which is only used
1859  * when returning false (i.e., uncertain). Returns most information regarding
1860  * an uncertain release sequence, including any write operations that might
1861  * break the sequence.
1862  * @return true, if the ModelChecker is certain that release_heads is complete;
1863  * false otherwise
1864  */
1865 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1866                 rel_heads_list_t *release_heads,
1867                 struct release_seq *pending) const
1868 {
1869         /* Only check for release sequences if there are no cycles */
1870         if (mo_graph->checkForCycles())
1871                 return false;
1872
1873         while (rf) {
1874                 ASSERT(rf->is_write());
1875
1876                 if (rf->is_release())
1877                         release_heads->push_back(rf);
1878                 else if (rf->get_last_fence_release())
1879                         release_heads->push_back(rf->get_last_fence_release());
1880                 if (!rf->is_rmw())
1881                         break; /* End of RMW chain */
1882
1883                 /** @todo Need to be smarter here...  In the linux lock
1884                  * example, this will run to the beginning of the program for
1885                  * every acquire. */
1886                 /** @todo The way to be smarter here is to keep going until 1
1887                  * thread has a release preceded by an acquire and you've seen
1888                  *       both. */
1889
1890                 /* acq_rel RMW is a sufficient stopping condition */
1891                 if (rf->is_acquire() && rf->is_release())
1892                         return true; /* complete */
1893
1894                 rf = rf->get_reads_from();
1895         };
1896         if (!rf) {
1897                 /* read from future: need to settle this later */
1898                 pending->rf = NULL;
1899                 return false; /* incomplete */
1900         }
1901
1902         if (rf->is_release())
1903                 return true; /* complete */
1904
1905         /* else relaxed write
1906          * - check for fence-release in the same thread (29.8, stmt. 3)
1907          * - check modification order for contiguous subsequence
1908          *   -> rf must be same thread as release */
1909
1910         const ModelAction *fence_release = rf->get_last_fence_release();
1911         /* Synchronize with a fence-release unconditionally; we don't need to
1912          * find any more "contiguous subsequence..." for it */
1913         if (fence_release)
1914                 release_heads->push_back(fence_release);
1915
1916         int tid = id_to_int(rf->get_tid());
1917         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1918         action_list_t *list = &(*thrd_lists)[tid];
1919         action_list_t::const_reverse_iterator rit;
1920
1921         /* Find rf in the thread list */
1922         rit = std::find(list->rbegin(), list->rend(), rf);
1923         ASSERT(rit != list->rend());
1924
1925         /* Find the last {write,fence}-release */
1926         for (; rit != list->rend(); rit++) {
1927                 if (fence_release && *(*rit) < *fence_release)
1928                         break;
1929                 if ((*rit)->is_release())
1930                         break;
1931         }
1932         if (rit == list->rend()) {
1933                 /* No write-release in this thread */
1934                 return true; /* complete */
1935         } else if (fence_release && *(*rit) < *fence_release) {
1936                 /* The fence-release is more recent (and so, "stronger") than
1937                  * the most recent write-release */
1938                 return true; /* complete */
1939         } /* else, need to establish contiguous release sequence */
1940         ModelAction *release = *rit;
1941
1942         ASSERT(rf->same_thread(release));
1943
1944         pending->writes.clear();
1945
1946         bool certain = true;
1947         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1948                 if (id_to_int(rf->get_tid()) == (int)i)
1949                         continue;
1950                 list = &(*thrd_lists)[i];
1951
1952                 /* Can we ensure no future writes from this thread may break
1953                  * the release seq? */
1954                 bool future_ordered = false;
1955
1956                 ModelAction *last = get_last_action(int_to_id(i));
1957                 Thread *th = get_thread(int_to_id(i));
1958                 if ((last && rf->happens_before(last)) ||
1959                                 !is_enabled(th) ||
1960                                 th->is_complete())
1961                         future_ordered = true;
1962
1963                 ASSERT(!th->is_model_thread() || future_ordered);
1964
1965                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1966                         const ModelAction *act = *rit;
1967                         /* Reach synchronization -> this thread is complete */
1968                         if (act->happens_before(release))
1969                                 break;
1970                         if (rf->happens_before(act)) {
1971                                 future_ordered = true;
1972                                 continue;
1973                         }
1974
1975                         /* Only non-RMW writes can break release sequences */
1976                         if (!act->is_write() || act->is_rmw())
1977                                 continue;
1978
1979                         /* Check modification order */
1980                         if (mo_graph->checkReachable(rf, act)) {
1981                                 /* rf --mo--> act */
1982                                 future_ordered = true;
1983                                 continue;
1984                         }
1985                         if (mo_graph->checkReachable(act, release))
1986                                 /* act --mo--> release */
1987                                 break;
1988                         if (mo_graph->checkReachable(release, act) &&
1989                                       mo_graph->checkReachable(act, rf)) {
1990                                 /* release --mo-> act --mo--> rf */
1991                                 return true; /* complete */
1992                         }
1993                         /* act may break release sequence */
1994                         pending->writes.push_back(act);
1995                         certain = false;
1996                 }
1997                 if (!future_ordered)
1998                         certain = false; /* This thread is uncertain */
1999         }
2000
2001         if (certain) {
2002                 release_heads->push_back(release);
2003                 pending->writes.clear();
2004         } else {
2005                 pending->release = release;
2006                 pending->rf = rf;
2007         }
2008         return certain;
2009 }
2010
2011 /**
2012  * An interface for getting the release sequence head(s) with which a
2013  * given ModelAction must synchronize. This function only returns a non-empty
2014  * result when it can locate a release sequence head with certainty. Otherwise,
2015  * it may mark the internal state of the ModelChecker so that it will handle
2016  * the release sequence at a later time, causing @a acquire to update its
2017  * synchronization at some later point in execution.
2018  *
2019  * @param acquire The 'acquire' action that may synchronize with a release
2020  * sequence
2021  * @param read The read action that may read from a release sequence; this may
2022  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2023  * when 'acquire' is a fence-acquire)
2024  * @param release_heads A pass-by-reference return parameter. Will be filled
2025  * with the head(s) of the release sequence(s), if they exists with certainty.
2026  * @see ModelChecker::release_seq_heads
2027  */
2028 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2029                 ModelAction *read, rel_heads_list_t *release_heads)
2030 {
2031         const ModelAction *rf = read->get_reads_from();
2032         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2033         sequence->acquire = acquire;
2034         sequence->read = read;
2035
2036         if (!release_seq_heads(rf, release_heads, sequence)) {
2037                 /* add act to 'lazy checking' list */
2038                 pending_rel_seqs->push_back(sequence);
2039         } else {
2040                 snapshot_free(sequence);
2041         }
2042 }
2043
2044 /**
2045  * Attempt to resolve all stashed operations that might synchronize with a
2046  * release sequence for a given location. This implements the "lazy" portion of
2047  * determining whether or not a release sequence was contiguous, since not all
2048  * modification order information is present at the time an action occurs.
2049  *
2050  * @param location The location/object that should be checked for release
2051  * sequence resolutions. A NULL value means to check all locations.
2052  * @param work_queue The work queue to which to add work items as they are
2053  * generated
2054  * @return True if any updates occurred (new synchronization, new mo_graph
2055  * edges)
2056  */
2057 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2058 {
2059         bool updated = false;
2060         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2061         while (it != pending_rel_seqs->end()) {
2062                 struct release_seq *pending = *it;
2063                 ModelAction *acquire = pending->acquire;
2064                 const ModelAction *read = pending->read;
2065
2066                 /* Only resolve sequences on the given location, if provided */
2067                 if (location && read->get_location() != location) {
2068                         it++;
2069                         continue;
2070                 }
2071
2072                 const ModelAction *rf = read->get_reads_from();
2073                 rel_heads_list_t release_heads;
2074                 bool complete;
2075                 complete = release_seq_heads(rf, &release_heads, pending);
2076                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2077                         if (!acquire->has_synchronized_with(release_heads[i])) {
2078                                 if (acquire->synchronize_with(release_heads[i]))
2079                                         updated = true;
2080                                 else
2081                                         set_bad_synchronization();
2082                         }
2083                 }
2084
2085                 if (updated) {
2086                         /* Re-check all pending release sequences */
2087                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2088                         /* Re-check read-acquire for mo_graph edges */
2089                         if (acquire->is_read())
2090                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2091
2092                         /* propagate synchronization to later actions */
2093                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2094                         for (; (*rit) != acquire; rit++) {
2095                                 ModelAction *propagate = *rit;
2096                                 if (acquire->happens_before(propagate)) {
2097                                         propagate->synchronize_with(acquire);
2098                                         /* Re-check 'propagate' for mo_graph edges */
2099                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2100                                 }
2101                         }
2102                 }
2103                 if (complete) {
2104                         it = pending_rel_seqs->erase(it);
2105                         snapshot_free(pending);
2106                 } else {
2107                         it++;
2108                 }
2109         }
2110
2111         // If we resolved promises or data races, see if we have realized a data race.
2112         checkDataRaces();
2113
2114         return updated;
2115 }
2116
2117 /**
2118  * Performs various bookkeeping operations for the current ModelAction. For
2119  * instance, adds action to the per-object, per-thread action vector and to the
2120  * action trace list of all thread actions.
2121  *
2122  * @param act is the ModelAction to add.
2123  */
2124 void ModelChecker::add_action_to_lists(ModelAction *act)
2125 {
2126         int tid = id_to_int(act->get_tid());
2127         ModelAction *uninit = NULL;
2128         int uninit_id = -1;
2129         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2130         if (list->empty() && act->is_atomic_var()) {
2131                 uninit = new_uninitialized_action(act->get_location());
2132                 uninit_id = id_to_int(uninit->get_tid());
2133                 list->push_back(uninit);
2134         }
2135         list->push_back(act);
2136
2137         action_trace->push_back(act);
2138         if (uninit)
2139                 action_trace->push_front(uninit);
2140
2141         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2142         if (tid >= (int)vec->size())
2143                 vec->resize(priv->next_thread_id);
2144         (*vec)[tid].push_back(act);
2145         if (uninit)
2146                 (*vec)[uninit_id].push_front(uninit);
2147
2148         if ((int)thrd_last_action->size() <= tid)
2149                 thrd_last_action->resize(get_num_threads());
2150         (*thrd_last_action)[tid] = act;
2151         if (uninit)
2152                 (*thrd_last_action)[uninit_id] = uninit;
2153
2154         if (act->is_fence() && act->is_release()) {
2155                 if ((int)thrd_last_fence_release->size() <= tid)
2156                         thrd_last_fence_release->resize(get_num_threads());
2157                 (*thrd_last_fence_release)[tid] = act;
2158         }
2159
2160         if (act->is_wait()) {
2161                 void *mutex_loc = (void *) act->get_value();
2162                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2163
2164                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2165                 if (tid >= (int)vec->size())
2166                         vec->resize(priv->next_thread_id);
2167                 (*vec)[tid].push_back(act);
2168         }
2169 }
2170
2171 /**
2172  * @brief Get the last action performed by a particular Thread
2173  * @param tid The thread ID of the Thread in question
2174  * @return The last action in the thread
2175  */
2176 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2177 {
2178         int threadid = id_to_int(tid);
2179         if (threadid < (int)thrd_last_action->size())
2180                 return (*thrd_last_action)[id_to_int(tid)];
2181         else
2182                 return NULL;
2183 }
2184
2185 /**
2186  * @brief Get the last fence release performed by a particular Thread
2187  * @param tid The thread ID of the Thread in question
2188  * @return The last fence release in the thread, if one exists; NULL otherwise
2189  */
2190 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2191 {
2192         int threadid = id_to_int(tid);
2193         if (threadid < (int)thrd_last_fence_release->size())
2194                 return (*thrd_last_fence_release)[id_to_int(tid)];
2195         else
2196                 return NULL;
2197 }
2198
2199 /**
2200  * Gets the last memory_order_seq_cst write (in the total global sequence)
2201  * performed on a particular object (i.e., memory location), not including the
2202  * current action.
2203  * @param curr The current ModelAction; also denotes the object location to
2204  * check
2205  * @return The last seq_cst write
2206  */
2207 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2208 {
2209         void *location = curr->get_location();
2210         action_list_t *list = get_safe_ptr_action(obj_map, location);
2211         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2212         action_list_t::reverse_iterator rit;
2213         for (rit = list->rbegin(); rit != list->rend(); rit++)
2214                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2215                         return *rit;
2216         return NULL;
2217 }
2218
2219 /**
2220  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2221  * performed in a particular thread, prior to a particular fence.
2222  * @param tid The ID of the thread to check
2223  * @param before_fence The fence from which to begin the search; if NULL, then
2224  * search for the most recent fence in the thread.
2225  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2226  */
2227 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2228 {
2229         /* All fences should have NULL location */
2230         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2231         action_list_t::reverse_iterator rit = list->rbegin();
2232
2233         if (before_fence) {
2234                 for (; rit != list->rend(); rit++)
2235                         if (*rit == before_fence)
2236                                 break;
2237
2238                 ASSERT(*rit == before_fence);
2239                 rit++;
2240         }
2241
2242         for (; rit != list->rend(); rit++)
2243                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2244                         return *rit;
2245         return NULL;
2246 }
2247
2248 /**
2249  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2250  * location). This function identifies the mutex according to the current
2251  * action, which is presumed to perform on the same mutex.
2252  * @param curr The current ModelAction; also denotes the object location to
2253  * check
2254  * @return The last unlock operation
2255  */
2256 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2257 {
2258         void *location = curr->get_location();
2259         action_list_t *list = get_safe_ptr_action(obj_map, location);
2260         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2261         action_list_t::reverse_iterator rit;
2262         for (rit = list->rbegin(); rit != list->rend(); rit++)
2263                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2264                         return *rit;
2265         return NULL;
2266 }
2267
2268 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2269 {
2270         ModelAction *parent = get_last_action(tid);
2271         if (!parent)
2272                 parent = get_thread(tid)->get_creation();
2273         return parent;
2274 }
2275
2276 /**
2277  * Returns the clock vector for a given thread.
2278  * @param tid The thread whose clock vector we want
2279  * @return Desired clock vector
2280  */
2281 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2282 {
2283         return get_parent_action(tid)->get_cv();
2284 }
2285
2286 /**
2287  * Resolve a set of Promises with a current write. The set is provided in the
2288  * Node corresponding to @a write.
2289  * @param write The ModelAction that is fulfilling Promises
2290  * @return True if promises were resolved; false otherwise
2291  */
2292 bool ModelChecker::resolve_promises(ModelAction *write)
2293 {
2294         bool haveResolved = false;
2295         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2296         promise_list_t mustResolve, resolved;
2297
2298         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2299                 Promise *promise = (*promises)[promise_index];
2300                 if (write->get_node()->get_promise(i)) {
2301                         ModelAction *read = promise->get_action();
2302                         read_from(read, write);
2303                         //Make sure the promise's value matches the write's value
2304                         ASSERT(promise->is_compatible(write));
2305                         mo_graph->resolvePromise(read, write, &mustResolve);
2306
2307                         resolved.push_back(promise);
2308                         promises->erase(promises->begin() + promise_index);
2309                         actions_to_check.push_back(read);
2310
2311                         haveResolved = true;
2312                 } else
2313                         promise_index++;
2314         }
2315
2316         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2317                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2318                                 == resolved.end())
2319                         priv->failed_promise = true;
2320         }
2321         for (unsigned int i = 0; i < resolved.size(); i++)
2322                 delete resolved[i];
2323         //Check whether reading these writes has made threads unable to
2324         //resolve promises
2325
2326         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2327                 ModelAction *read = actions_to_check[i];
2328                 mo_check_promises(read, true);
2329         }
2330
2331         return haveResolved;
2332 }
2333
2334 /**
2335  * Compute the set of promises that could potentially be satisfied by this
2336  * action. Note that the set computation actually appears in the Node, not in
2337  * ModelChecker.
2338  * @param curr The ModelAction that may satisfy promises
2339  */
2340 void ModelChecker::compute_promises(ModelAction *curr)
2341 {
2342         for (unsigned int i = 0; i < promises->size(); i++) {
2343                 Promise *promise = (*promises)[i];
2344                 const ModelAction *act = promise->get_action();
2345                 if (!act->happens_before(curr) &&
2346                                 act->is_read() &&
2347                                 !act->could_synchronize_with(curr) &&
2348                                 !act->same_thread(curr) &&
2349                                 act->get_location() == curr->get_location() &&
2350                                 promise->get_value() == curr->get_value()) {
2351                         curr->get_node()->set_promise(i, act->is_rmw());
2352                 }
2353         }
2354 }
2355
2356 /** Checks promises in response to change in ClockVector Threads. */
2357 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2358 {
2359         for (unsigned int i = 0; i < promises->size(); i++) {
2360                 Promise *promise = (*promises)[i];
2361                 const ModelAction *act = promise->get_action();
2362                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2363                                 merge_cv->synchronized_since(act)) {
2364                         if (promise->eliminate_thread(tid)) {
2365                                 //Promise has failed
2366                                 priv->failed_promise = true;
2367                                 return;
2368                         }
2369                 }
2370         }
2371 }
2372
2373 void ModelChecker::check_promises_thread_disabled()
2374 {
2375         for (unsigned int i = 0; i < promises->size(); i++) {
2376                 Promise *promise = (*promises)[i];
2377                 if (promise->has_failed()) {
2378                         priv->failed_promise = true;
2379                         return;
2380                 }
2381         }
2382 }
2383
2384 /**
2385  * @brief Checks promises in response to addition to modification order for
2386  * threads.
2387  *
2388  * We test whether threads are still available for satisfying promises after an
2389  * addition to our modification order constraints. Those that are unavailable
2390  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2391  * that promise has failed.
2392  *
2393  * @param act The ModelAction which updated the modification order
2394  * @param is_read_check Should be true if act is a read and we must check for
2395  * updates to the store from which it read (there is a distinction here for
2396  * RMW's, which are both a load and a store)
2397  */
2398 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2399 {
2400         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2401
2402         for (unsigned int i = 0; i < promises->size(); i++) {
2403                 Promise *promise = (*promises)[i];
2404                 const ModelAction *pread = promise->get_action();
2405
2406                 // Is this promise on the same location?
2407                 if (!pread->same_var(write))
2408                         continue;
2409
2410                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2411                         priv->failed_promise = true;
2412                         return;
2413                 }
2414
2415                 // Don't do any lookups twice for the same thread
2416                 if (!promise->thread_is_available(act->get_tid()))
2417                         continue;
2418
2419                 if (mo_graph->checkReachable(promise, write)) {
2420                         if (mo_graph->checkPromise(write, promise)) {
2421                                 priv->failed_promise = true;
2422                                 return;
2423                         }
2424                 }
2425         }
2426 }
2427
2428 /**
2429  * Compute the set of writes that may break the current pending release
2430  * sequence. This information is extracted from previou release sequence
2431  * calculations.
2432  *
2433  * @param curr The current ModelAction. Must be a release sequence fixup
2434  * action.
2435  */
2436 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2437 {
2438         if (pending_rel_seqs->empty())
2439                 return;
2440
2441         struct release_seq *pending = pending_rel_seqs->back();
2442         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2443                 const ModelAction *write = pending->writes[i];
2444                 curr->get_node()->add_relseq_break(write);
2445         }
2446
2447         /* NULL means don't break the sequence; just synchronize */
2448         curr->get_node()->add_relseq_break(NULL);
2449 }
2450
2451 /**
2452  * Build up an initial set of all past writes that this 'read' action may read
2453  * from. This set is determined by the clock vector's "happens before"
2454  * relationship.
2455  * @param curr is the current ModelAction that we are exploring; it must be a
2456  * 'read' operation.
2457  */
2458 void ModelChecker::build_reads_from_past(ModelAction *curr)
2459 {
2460         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2461         unsigned int i;
2462         ASSERT(curr->is_read());
2463
2464         ModelAction *last_sc_write = NULL;
2465
2466         if (curr->is_seqcst())
2467                 last_sc_write = get_last_seq_cst_write(curr);
2468
2469         /* Iterate over all threads */
2470         for (i = 0; i < thrd_lists->size(); i++) {
2471                 /* Iterate over actions in thread, starting from most recent */
2472                 action_list_t *list = &(*thrd_lists)[i];
2473                 action_list_t::reverse_iterator rit;
2474                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2475                         ModelAction *act = *rit;
2476
2477                         /* Only consider 'write' actions */
2478                         if (!act->is_write() || act == curr)
2479                                 continue;
2480
2481                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2482                         bool allow_read = true;
2483
2484                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2485                                 allow_read = false;
2486                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2487                                 allow_read = false;
2488
2489                         if (allow_read) {
2490                                 /* Only add feasible reads */
2491                                 mo_graph->startChanges();
2492                                 r_modification_order(curr, act);
2493                                 if (!is_infeasible())
2494                                         curr->get_node()->add_read_from(act);
2495                                 mo_graph->rollbackChanges();
2496                         }
2497
2498                         /* Include at most one act per-thread that "happens before" curr */
2499                         if (act->happens_before(curr))
2500                                 break;
2501                 }
2502         }
2503         /* We may find no valid may-read-from only if the execution is doomed */
2504         if (!curr->get_node()->get_read_from_size()) {
2505                 priv->no_valid_reads = true;
2506                 set_assert();
2507         }
2508
2509         if (DBG_ENABLED()) {
2510                 model_print("Reached read action:\n");
2511                 curr->print();
2512                 model_print("Printing may_read_from\n");
2513                 curr->get_node()->print_may_read_from();
2514                 model_print("End printing may_read_from\n");
2515         }
2516 }
2517
2518 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2519 {
2520         while (true) {
2521                 /* UNINIT actions don't have a Node, and they never sleep */
2522                 if (write->is_uninitialized())
2523                         return true;
2524                 Node *prevnode = write->get_node()->get_parent();
2525
2526                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2527                 if (write->is_release() && thread_sleep)
2528                         return true;
2529                 if (!write->is_rmw()) {
2530                         return false;
2531                 }
2532                 if (write->get_reads_from() == NULL)
2533                         return true;
2534                 write = write->get_reads_from();
2535         }
2536 }
2537
2538 /**
2539  * @brief Create a new action representing an uninitialized atomic
2540  * @param location The memory location of the atomic object
2541  * @return A pointer to a new ModelAction
2542  */
2543 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2544 {
2545         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2546         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2547         act->create_cv(NULL);
2548         return act;
2549 }
2550
2551 static void print_list(action_list_t *list)
2552 {
2553         action_list_t::iterator it;
2554
2555         model_print("---------------------------------------------------------------------\n");
2556
2557         unsigned int hash = 0;
2558
2559         for (it = list->begin(); it != list->end(); it++) {
2560                 (*it)->print();
2561                 hash = hash^(hash<<3)^((*it)->hash());
2562         }
2563         model_print("HASH %u\n", hash);
2564         model_print("---------------------------------------------------------------------\n");
2565 }
2566
2567 #if SUPPORT_MOD_ORDER_DUMP
2568 void ModelChecker::dumpGraph(char *filename) const
2569 {
2570         char buffer[200];
2571         sprintf(buffer, "%s.dot", filename);
2572         FILE *file = fopen(buffer, "w");
2573         fprintf(file, "digraph %s {\n", filename);
2574         mo_graph->dumpNodes(file);
2575         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2576
2577         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2578                 ModelAction *action = *it;
2579                 if (action->is_read()) {
2580                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2581                         if (action->get_reads_from() != NULL)
2582                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2583                 }
2584                 if (thread_array[action->get_tid()] != NULL) {
2585                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2586                 }
2587
2588                 thread_array[action->get_tid()] = action;
2589         }
2590         fprintf(file, "}\n");
2591         model_free(thread_array);
2592         fclose(file);
2593 }
2594 #endif
2595
2596 /** @brief Prints an execution trace summary. */
2597 void ModelChecker::print_summary() const
2598 {
2599 #if SUPPORT_MOD_ORDER_DUMP
2600         char buffername[100];
2601         sprintf(buffername, "exec%04u", stats.num_total);
2602         mo_graph->dumpGraphToFile(buffername);
2603         sprintf(buffername, "graph%04u", stats.num_total);
2604         dumpGraph(buffername);
2605 #endif
2606
2607         model_print("Execution %d:", stats.num_total);
2608         if (isfeasibleprefix())
2609                 model_print("\n");
2610         else
2611                 print_infeasibility(" INFEASIBLE");
2612         print_list(action_trace);
2613         model_print("\n");
2614 }
2615
2616 /**
2617  * Add a Thread to the system for the first time. Should only be called once
2618  * per thread.
2619  * @param t The Thread to add
2620  */
2621 void ModelChecker::add_thread(Thread *t)
2622 {
2623         thread_map->put(id_to_int(t->get_id()), t);
2624         scheduler->add_thread(t);
2625 }
2626
2627 /**
2628  * Removes a thread from the scheduler.
2629  * @param the thread to remove.
2630  */
2631 void ModelChecker::remove_thread(Thread *t)
2632 {
2633         scheduler->remove_thread(t);
2634 }
2635
2636 /**
2637  * @brief Get a Thread reference by its ID
2638  * @param tid The Thread's ID
2639  * @return A Thread reference
2640  */
2641 Thread * ModelChecker::get_thread(thread_id_t tid) const
2642 {
2643         return thread_map->get(id_to_int(tid));
2644 }
2645
2646 /**
2647  * @brief Get a reference to the Thread in which a ModelAction was executed
2648  * @param act The ModelAction
2649  * @return A Thread reference
2650  */
2651 Thread * ModelChecker::get_thread(const ModelAction *act) const
2652 {
2653         return get_thread(act->get_tid());
2654 }
2655
2656 /**
2657  * @brief Check if a Thread is currently enabled
2658  * @param t The Thread to check
2659  * @return True if the Thread is currently enabled
2660  */
2661 bool ModelChecker::is_enabled(Thread *t) const
2662 {
2663         return scheduler->is_enabled(t);
2664 }
2665
2666 /**
2667  * @brief Check if a Thread is currently enabled
2668  * @param tid The ID of the Thread to check
2669  * @return True if the Thread is currently enabled
2670  */
2671 bool ModelChecker::is_enabled(thread_id_t tid) const
2672 {
2673         return scheduler->is_enabled(tid);
2674 }
2675
2676 /**
2677  * Switch from a model-checker context to a user-thread context. This is the
2678  * complement of ModelChecker::switch_to_master and must be called from the
2679  * model-checker context
2680  *
2681  * @param thread The user-thread to switch to
2682  */
2683 void ModelChecker::switch_from_master(Thread *thread)
2684 {
2685         scheduler->set_current_thread(thread);
2686         Thread::swap(&system_context, thread);
2687 }
2688
2689 /**
2690  * Switch from a user-context to the "master thread" context (a.k.a. system
2691  * context). This switch is made with the intention of exploring a particular
2692  * model-checking action (described by a ModelAction object). Must be called
2693  * from a user-thread context.
2694  *
2695  * @param act The current action that will be explored. May be NULL only if
2696  * trace is exiting via an assertion (see ModelChecker::set_assert and
2697  * ModelChecker::has_asserted).
2698  * @return Return the value returned by the current action
2699  */
2700 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2701 {
2702         DBG();
2703         Thread *old = thread_current();
2704         ASSERT(!old->get_pending());
2705         old->set_pending(act);
2706         if (Thread::swap(old, &system_context) < 0) {
2707                 perror("swap threads");
2708                 exit(EXIT_FAILURE);
2709         }
2710         return old->get_return_value();
2711 }
2712
2713 /**
2714  * Takes the next step in the execution, if possible.
2715  * @param curr The current step to take
2716  * @return Returns the next Thread to run, if any; NULL if this execution
2717  * should terminate
2718  */
2719 Thread * ModelChecker::take_step(ModelAction *curr)
2720 {
2721         Thread *curr_thrd = get_thread(curr);
2722         ASSERT(curr_thrd->get_state() == THREAD_READY);
2723
2724         curr = check_current_action(curr);
2725
2726         /* Infeasible -> don't take any more steps */
2727         if (is_infeasible())
2728                 return NULL;
2729         else if (isfeasibleprefix() && have_bug_reports()) {
2730                 set_assert();
2731                 return NULL;
2732         }
2733
2734         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2735                 return NULL;
2736
2737         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2738                 scheduler->remove_thread(curr_thrd);
2739
2740         Thread *next_thrd = get_next_thread(curr);
2741
2742         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2743                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2744
2745         return next_thrd;
2746 }
2747
2748 /** Wrapper to run the user's main function, with appropriate arguments */
2749 void user_main_wrapper(void *)
2750 {
2751         user_main(model->params.argc, model->params.argv);
2752 }
2753
2754 /** @brief Run ModelChecker for the user program */
2755 void ModelChecker::run()
2756 {
2757         do {
2758                 thrd_t user_thread;
2759                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2760                 add_thread(t);
2761
2762                 do {
2763                         /*
2764                          * Stash next pending action(s) for thread(s). There
2765                          * should only need to stash one thread's action--the
2766                          * thread which just took a step--plus the first step
2767                          * for any newly-created thread
2768                          */
2769                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2770                                 thread_id_t tid = int_to_id(i);
2771                                 Thread *thr = get_thread(tid);
2772                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2773                                         switch_from_master(thr);
2774                                 }
2775                         }
2776
2777                         /* Catch assertions from prior take_step or from
2778                          * between-ModelAction bugs (e.g., data races) */
2779                         if (has_asserted())
2780                                 break;
2781
2782                         /* Consume the next action for a Thread */
2783                         ModelAction *curr = t->get_pending();
2784                         t->set_pending(NULL);
2785                         t = take_step(curr);
2786                 } while (t && !t->is_model_thread());
2787
2788                 /*
2789                  * Launch end-of-execution release sequence fixups only when
2790                  * the execution is otherwise feasible AND there are:
2791                  *
2792                  * (1) pending release sequences
2793                  * (2) pending assertions that could be invalidated by a change
2794                  * in clock vectors (i.e., data races)
2795                  * (3) no pending promises
2796                  */
2797                 while (!pending_rel_seqs->empty() &&
2798                                 is_feasible_prefix_ignore_relseq() &&
2799                                 !unrealizedraces.empty()) {
2800                         model_print("*** WARNING: release sequence fixup action "
2801                                         "(%zu pending release seuqence(s)) ***\n",
2802                                         pending_rel_seqs->size());
2803                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2804                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2805                                         model_thread);
2806                         take_step(fixup);
2807                 };
2808         } while (next_execution());
2809
2810         model_print("******* Model-checking complete: *******\n");
2811         print_stats();
2812 }