model: add 'add_future_value()' wrapper
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 current_action(NULL),
43                 /* First thread created will have id INITIAL_THREAD_ID */
44                 next_thread_id(INITIAL_THREAD_ID),
45                 used_sequence_numbers(0),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         ModelAction *current_action;
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         ModelAction *next_backtrack;
65         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66         struct execution_stats stats;
67         bool failed_promise;
68         bool too_many_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         snapshot_backtrack_before(0);
163 }
164
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
167 {
168         return priv->next_thread_id++;
169 }
170
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
173 {
174         return priv->next_thread_id;
175 }
176
177 /**
178  * Must be called from user-thread context (e.g., through the global
179  * thread_current() interface)
180  *
181  * @return The currently executing Thread.
182  */
183 Thread * ModelChecker::get_current_thread() const
184 {
185         return scheduler->get_current_thread();
186 }
187
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
190 {
191         return ++priv->used_sequence_numbers;
192 }
193
194 Node * ModelChecker::get_curr_node() const
195 {
196         return node_stack->get_head();
197 }
198
199 /**
200  * @brief Choose the next thread to execute.
201  *
202  * This function chooses the next thread that should execute. It can force the
203  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205  * The model-checker may have no preference regarding the next thread (i.e.,
206  * when exploring a new execution ordering), in which case this will return
207  * NULL.
208  * @param curr The current ModelAction. This action might guide the choice of
209  * next thread.
210  * @return The next thread to run. If the model-checker has no preference, NULL.
211  */
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
213 {
214         thread_id_t tid;
215
216         if (curr != NULL) {
217                 /* Do not split atomic actions. */
218                 if (curr->is_rmwr())
219                         return thread_current();
220                 else if (curr->get_type() == THREAD_CREATE)
221                         return curr->get_thread_operand();
222         }
223
224         /* Have we completed exploring the preselected path? */
225         if (diverge == NULL)
226                 return NULL;
227
228         /* Else, we are trying to replay an execution */
229         ModelAction *next = node_stack->get_next()->get_action();
230
231         if (next == diverge) {
232                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
233                         earliest_diverge = diverge;
234
235                 Node *nextnode = next->get_node();
236                 Node *prevnode = nextnode->get_parent();
237                 scheduler->update_sleep_set(prevnode);
238
239                 /* Reached divergence point */
240                 if (nextnode->increment_misc()) {
241                         /* The next node will try to satisfy a different misc_index values. */
242                         tid = next->get_tid();
243                         node_stack->pop_restofstack(2);
244                 } else if (nextnode->increment_promise()) {
245                         /* The next node will try to satisfy a different set of promises. */
246                         tid = next->get_tid();
247                         node_stack->pop_restofstack(2);
248                 } else if (nextnode->increment_read_from()) {
249                         /* The next node will read from a different value. */
250                         tid = next->get_tid();
251                         node_stack->pop_restofstack(2);
252                 } else if (nextnode->increment_future_value()) {
253                         /* The next node will try to read from a different future value. */
254                         tid = next->get_tid();
255                         node_stack->pop_restofstack(2);
256                 } else if (nextnode->increment_relseq_break()) {
257                         /* The next node will try to resolve a release sequence differently */
258                         tid = next->get_tid();
259                         node_stack->pop_restofstack(2);
260                 } else {
261                         ASSERT(prevnode);
262                         /* Make a different thread execute for next step */
263                         scheduler->add_sleep(get_thread(next->get_tid()));
264                         tid = prevnode->get_next_backtrack();
265                         /* Make sure the backtracked thread isn't sleeping. */
266                         node_stack->pop_restofstack(1);
267                         if (diverge == earliest_diverge) {
268                                 earliest_diverge = prevnode->get_action();
269                         }
270                 }
271                 /* The correct sleep set is in the parent node. */
272                 execute_sleep_set();
273
274                 DEBUG("*** Divergence point ***\n");
275
276                 diverge = NULL;
277         } else {
278                 tid = next->get_tid();
279         }
280         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
281         ASSERT(tid != THREAD_ID_T_NONE);
282         return thread_map->get(id_to_int(tid));
283 }
284
285 /**
286  * We need to know what the next actions of all threads in the sleep
287  * set will be.  This method computes them and stores the actions at
288  * the corresponding thread object's pending action.
289  */
290
291 void ModelChecker::execute_sleep_set()
292 {
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *thr = get_thread(tid);
296                 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
297                         thr->set_state(THREAD_RUNNING);
298                         scheduler->next_thread(thr);
299                         Thread::swap(&system_context, thr);
300                         priv->current_action->set_sleep_flag();
301                         thr->set_pending(priv->current_action);
302                 }
303         }
304 }
305
306 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
307 {
308         for (unsigned int i = 0; i < get_num_threads(); i++) {
309                 Thread *thr = get_thread(int_to_id(i));
310                 if (scheduler->is_sleep_set(thr)) {
311                         ModelAction *pending_act = thr->get_pending();
312                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
313                                 //Remove this thread from sleep set
314                                 scheduler->remove_sleep(thr);
315                 }
316         }
317 }
318
319 /** @brief Alert the model-checker that an incorrectly-ordered
320  * synchronization was made */
321 void ModelChecker::set_bad_synchronization()
322 {
323         priv->bad_synchronization = true;
324 }
325
326 bool ModelChecker::has_asserted() const
327 {
328         return priv->asserted;
329 }
330
331 void ModelChecker::set_assert()
332 {
333         priv->asserted = true;
334 }
335
336 /**
337  * Check if we are in a deadlock. Should only be called at the end of an
338  * execution, although it should not give false positives in the middle of an
339  * execution (there should be some ENABLED thread).
340  *
341  * @return True if program is in a deadlock; false otherwise
342  */
343 bool ModelChecker::is_deadlocked() const
344 {
345         bool blocking_threads = false;
346         for (unsigned int i = 0; i < get_num_threads(); i++) {
347                 thread_id_t tid = int_to_id(i);
348                 if (is_enabled(tid))
349                         return false;
350                 Thread *t = get_thread(tid);
351                 if (!t->is_model_thread() && t->get_pending())
352                         blocking_threads = true;
353         }
354         return blocking_threads;
355 }
356
357 /**
358  * Check if this is a complete execution. That is, have all thread completed
359  * execution (rather than exiting because sleep sets have forced a redundant
360  * execution).
361  *
362  * @return True if the execution is complete.
363  */
364 bool ModelChecker::is_complete_execution() const
365 {
366         for (unsigned int i = 0; i < get_num_threads(); i++)
367                 if (is_enabled(int_to_id(i)))
368                         return false;
369         return true;
370 }
371
372 /**
373  * @brief Assert a bug in the executing program.
374  *
375  * Use this function to assert any sort of bug in the user program. If the
376  * current trace is feasible (actually, a prefix of some feasible execution),
377  * then this execution will be aborted, printing the appropriate message. If
378  * the current trace is not yet feasible, the error message will be stashed and
379  * printed if the execution ever becomes feasible.
380  *
381  * @param msg Descriptive message for the bug (do not include newline char)
382  * @return True if bug is immediately-feasible
383  */
384 bool ModelChecker::assert_bug(const char *msg)
385 {
386         priv->bugs.push_back(new bug_message(msg));
387
388         if (isfeasibleprefix()) {
389                 set_assert();
390                 return true;
391         }
392         return false;
393 }
394
395 /**
396  * @brief Assert a bug in the executing program, asserted by a user thread
397  * @see ModelChecker::assert_bug
398  * @param msg Descriptive message for the bug (do not include newline char)
399  */
400 void ModelChecker::assert_user_bug(const char *msg)
401 {
402         /* If feasible bug, bail out now */
403         if (assert_bug(msg))
404                 switch_to_master(NULL);
405 }
406
407 /** @return True, if any bugs have been reported for this execution */
408 bool ModelChecker::have_bug_reports() const
409 {
410         return priv->bugs.size() != 0;
411 }
412
413 /** @brief Print bug report listing for this execution (if any bugs exist) */
414 void ModelChecker::print_bugs() const
415 {
416         if (have_bug_reports()) {
417                 model_print("Bug report: %zu bug%s detected\n",
418                                 priv->bugs.size(),
419                                 priv->bugs.size() > 1 ? "s" : "");
420                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
421                         priv->bugs[i]->print();
422         }
423 }
424
425 /**
426  * @brief Record end-of-execution stats
427  *
428  * Must be run when exiting an execution. Records various stats.
429  * @see struct execution_stats
430  */
431 void ModelChecker::record_stats()
432 {
433         stats.num_total++;
434         if (!isfeasibleprefix())
435                 stats.num_infeasible++;
436         else if (have_bug_reports())
437                 stats.num_buggy_executions++;
438         else if (is_complete_execution())
439                 stats.num_complete++;
440         else
441                 stats.num_redundant++;
442 }
443
444 /** @brief Print execution stats */
445 void ModelChecker::print_stats() const
446 {
447         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
448         model_print("Number of redundant executions: %d\n", stats.num_redundant);
449         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
450         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
451         model_print("Total executions: %d\n", stats.num_total);
452         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
453 }
454
455 /**
456  * @brief End-of-exeuction print
457  * @param printbugs Should any existing bugs be printed?
458  */
459 void ModelChecker::print_execution(bool printbugs) const
460 {
461         print_program_output();
462
463         if (DBG_ENABLED() || params.verbose) {
464                 model_print("Earliest divergence point since last feasible execution:\n");
465                 if (earliest_diverge)
466                         earliest_diverge->print();
467                 else
468                         model_print("(Not set)\n");
469
470                 model_print("\n");
471                 print_stats();
472         }
473
474         /* Don't print invalid bugs */
475         if (printbugs)
476                 print_bugs();
477
478         model_print("\n");
479         print_summary();
480 }
481
482 /**
483  * Queries the model-checker for more executions to explore and, if one
484  * exists, resets the model-checker state to execute a new execution.
485  *
486  * @return If there are more executions to explore, return true. Otherwise,
487  * return false.
488  */
489 bool ModelChecker::next_execution()
490 {
491         DBG();
492         /* Is this execution a feasible execution that's worth bug-checking? */
493         bool complete = isfeasibleprefix() && (is_complete_execution() ||
494                         have_bug_reports());
495
496         /* End-of-execution bug checks */
497         if (complete) {
498                 if (is_deadlocked())
499                         assert_bug("Deadlock detected");
500
501                 checkDataRaces();
502         }
503
504         record_stats();
505
506         /* Output */
507         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
508                 print_execution(complete);
509         else
510                 clear_program_output();
511
512         if (complete)
513                 earliest_diverge = NULL;
514
515         if ((diverge = get_next_backtrack()) == NULL)
516                 return false;
517
518         if (DBG_ENABLED()) {
519                 model_print("Next execution will diverge at:\n");
520                 diverge->print();
521         }
522
523         reset_to_initial_state();
524         return true;
525 }
526
527 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
528 {
529         switch (act->get_type()) {
530         case ATOMIC_FENCE:
531         case ATOMIC_READ:
532         case ATOMIC_WRITE:
533         case ATOMIC_RMW: {
534                 /* Optimization: relaxed operations don't need backtracking */
535                 if (act->is_relaxed())
536                         return NULL;
537                 /* linear search: from most recent to oldest */
538                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
539                 action_list_t::reverse_iterator rit;
540                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
541                         ModelAction *prev = *rit;
542                         if (prev->could_synchronize_with(act))
543                                 return prev;
544                 }
545                 break;
546         }
547         case ATOMIC_LOCK:
548         case ATOMIC_TRYLOCK: {
549                 /* linear search: from most recent to oldest */
550                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
551                 action_list_t::reverse_iterator rit;
552                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
553                         ModelAction *prev = *rit;
554                         if (act->is_conflicting_lock(prev))
555                                 return prev;
556                 }
557                 break;
558         }
559         case ATOMIC_UNLOCK: {
560                 /* linear search: from most recent to oldest */
561                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
562                 action_list_t::reverse_iterator rit;
563                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
564                         ModelAction *prev = *rit;
565                         if (!act->same_thread(prev) && prev->is_failed_trylock())
566                                 return prev;
567                 }
568                 break;
569         }
570         case ATOMIC_WAIT: {
571                 /* linear search: from most recent to oldest */
572                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
573                 action_list_t::reverse_iterator rit;
574                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
575                         ModelAction *prev = *rit;
576                         if (!act->same_thread(prev) && prev->is_failed_trylock())
577                                 return prev;
578                         if (!act->same_thread(prev) && prev->is_notify())
579                                 return prev;
580                 }
581                 break;
582         }
583
584         case ATOMIC_NOTIFY_ALL:
585         case ATOMIC_NOTIFY_ONE: {
586                 /* linear search: from most recent to oldest */
587                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
588                 action_list_t::reverse_iterator rit;
589                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
590                         ModelAction *prev = *rit;
591                         if (!act->same_thread(prev) && prev->is_wait())
592                                 return prev;
593                 }
594                 break;
595         }
596         default:
597                 break;
598         }
599         return NULL;
600 }
601
602 /** This method finds backtracking points where we should try to
603  * reorder the parameter ModelAction against.
604  *
605  * @param the ModelAction to find backtracking points for.
606  */
607 void ModelChecker::set_backtracking(ModelAction *act)
608 {
609         Thread *t = get_thread(act);
610         ModelAction *prev = get_last_conflict(act);
611         if (prev == NULL)
612                 return;
613
614         Node *node = prev->get_node()->get_parent();
615
616         int low_tid, high_tid;
617         if (node->is_enabled(t)) {
618                 low_tid = id_to_int(act->get_tid());
619                 high_tid = low_tid + 1;
620         } else {
621                 low_tid = 0;
622                 high_tid = get_num_threads();
623         }
624
625         for (int i = low_tid; i < high_tid; i++) {
626                 thread_id_t tid = int_to_id(i);
627
628                 /* Make sure this thread can be enabled here. */
629                 if (i >= node->get_num_threads())
630                         break;
631
632                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
633                 if (node->enabled_status(tid) != THREAD_ENABLED)
634                         continue;
635
636                 /* Check if this has been explored already */
637                 if (node->has_been_explored(tid))
638                         continue;
639
640                 /* See if fairness allows */
641                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
642                         bool unfair = false;
643                         for (int t = 0; t < node->get_num_threads(); t++) {
644                                 thread_id_t tother = int_to_id(t);
645                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
646                                         unfair = true;
647                                         break;
648                                 }
649                         }
650                         if (unfair)
651                                 continue;
652                 }
653                 /* Cache the latest backtracking point */
654                 set_latest_backtrack(prev);
655
656                 /* If this is a new backtracking point, mark the tree */
657                 if (!node->set_backtrack(tid))
658                         continue;
659                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
660                                         id_to_int(prev->get_tid()),
661                                         id_to_int(t->get_id()));
662                 if (DBG_ENABLED()) {
663                         prev->print();
664                         act->print();
665                 }
666         }
667 }
668
669 /**
670  * @brief Cache the a backtracking point as the "most recent", if eligible
671  *
672  * Note that this does not prepare the NodeStack for this backtracking
673  * operation, it only caches the action on a per-execution basis
674  *
675  * @param act The operation at which we should explore a different next action
676  * (i.e., backtracking point)
677  * @return True, if this action is now the most recent backtracking point;
678  * false otherwise
679  */
680 bool ModelChecker::set_latest_backtrack(ModelAction *act)
681 {
682         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
683                 priv->next_backtrack = act;
684                 return true;
685         }
686         return false;
687 }
688
689 /**
690  * Returns last backtracking point. The model checker will explore a different
691  * path for this point in the next execution.
692  * @return The ModelAction at which the next execution should diverge.
693  */
694 ModelAction * ModelChecker::get_next_backtrack()
695 {
696         ModelAction *next = priv->next_backtrack;
697         priv->next_backtrack = NULL;
698         return next;
699 }
700
701 /**
702  * Processes a read or rmw model action.
703  * @param curr is the read model action to process.
704  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
705  * @return True if processing this read updates the mo_graph.
706  */
707 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
708 {
709         uint64_t value = VALUE_NONE;
710         bool updated = false;
711         while (true) {
712                 const ModelAction *reads_from = curr->get_node()->get_read_from();
713                 if (reads_from != NULL) {
714                         mo_graph->startChanges();
715
716                         value = reads_from->get_value();
717                         bool r_status = false;
718
719                         if (!second_part_of_rmw) {
720                                 check_recency(curr, reads_from);
721                                 r_status = r_modification_order(curr, reads_from);
722                         }
723
724
725                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
726                                 mo_graph->rollbackChanges();
727                                 priv->too_many_reads = false;
728                                 continue;
729                         }
730
731                         read_from(curr, reads_from);
732                         mo_graph->commitChanges();
733                         mo_check_promises(curr->get_tid(), reads_from);
734
735                         updated |= r_status;
736                 } else if (!second_part_of_rmw) {
737                         /* Read from future value */
738                         value = curr->get_node()->get_future_value();
739                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
740                         curr->set_read_from(NULL);
741                         Promise *valuepromise = new Promise(curr, value, expiration);
742                         promises->push_back(valuepromise);
743                 }
744                 get_thread(curr)->set_return_value(value);
745                 return updated;
746         }
747 }
748
749 /**
750  * Processes a lock, trylock, or unlock model action.  @param curr is
751  * the read model action to process.
752  *
753  * The try lock operation checks whether the lock is taken.  If not,
754  * it falls to the normal lock operation case.  If so, it returns
755  * fail.
756  *
757  * The lock operation has already been checked that it is enabled, so
758  * it just grabs the lock and synchronizes with the previous unlock.
759  *
760  * The unlock operation has to re-enable all of the threads that are
761  * waiting on the lock.
762  *
763  * @return True if synchronization was updated; false otherwise
764  */
765 bool ModelChecker::process_mutex(ModelAction *curr)
766 {
767         std::mutex *mutex = NULL;
768         struct std::mutex_state *state = NULL;
769
770         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
771                 mutex = (std::mutex *)curr->get_location();
772                 state = mutex->get_state();
773         } else if (curr->is_wait()) {
774                 mutex = (std::mutex *)curr->get_value();
775                 state = mutex->get_state();
776         }
777
778         switch (curr->get_type()) {
779         case ATOMIC_TRYLOCK: {
780                 bool success = !state->islocked;
781                 curr->set_try_lock(success);
782                 if (!success) {
783                         get_thread(curr)->set_return_value(0);
784                         break;
785                 }
786                 get_thread(curr)->set_return_value(1);
787         }
788                 //otherwise fall into the lock case
789         case ATOMIC_LOCK: {
790                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
791                         assert_bug("Lock access before initialization");
792                 state->islocked = true;
793                 ModelAction *unlock = get_last_unlock(curr);
794                 //synchronize with the previous unlock statement
795                 if (unlock != NULL) {
796                         curr->synchronize_with(unlock);
797                         return true;
798                 }
799                 break;
800         }
801         case ATOMIC_UNLOCK: {
802                 //unlock the lock
803                 state->islocked = false;
804                 //wake up the other threads
805                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
806                 //activate all the waiting threads
807                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
808                         scheduler->wake(get_thread(*rit));
809                 }
810                 waiters->clear();
811                 break;
812         }
813         case ATOMIC_WAIT: {
814                 //unlock the lock
815                 state->islocked = false;
816                 //wake up the other threads
817                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
818                 //activate all the waiting threads
819                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
820                         scheduler->wake(get_thread(*rit));
821                 }
822                 waiters->clear();
823                 //check whether we should go to sleep or not...simulate spurious failures
824                 if (curr->get_node()->get_misc() == 0) {
825                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
826                         //disable us
827                         scheduler->sleep(get_thread(curr));
828                 }
829                 break;
830         }
831         case ATOMIC_NOTIFY_ALL: {
832                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
833                 //activate all the waiting threads
834                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
835                         scheduler->wake(get_thread(*rit));
836                 }
837                 waiters->clear();
838                 break;
839         }
840         case ATOMIC_NOTIFY_ONE: {
841                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
842                 int wakeupthread = curr->get_node()->get_misc();
843                 action_list_t::iterator it = waiters->begin();
844                 advance(it, wakeupthread);
845                 scheduler->wake(get_thread(*it));
846                 waiters->erase(it);
847                 break;
848         }
849
850         default:
851                 ASSERT(0);
852         }
853         return false;
854 }
855
856 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
857 {
858         /* Do more ambitious checks now that mo is more complete */
859         if (mo_may_allow(writer, reader) &&
860                         reader->get_node()->add_future_value(writer->get_value(),
861                                 writer->get_seq_number() + params.maxfuturedelay))
862                 set_latest_backtrack(reader);
863 }
864
865 /**
866  * Process a write ModelAction
867  * @param curr The ModelAction to process
868  * @return True if the mo_graph was updated or promises were resolved
869  */
870 bool ModelChecker::process_write(ModelAction *curr)
871 {
872         bool updated_mod_order = w_modification_order(curr);
873         bool updated_promises = resolve_promises(curr);
874
875         if (promises->size() == 0) {
876                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
877                         struct PendingFutureValue pfv = (*futurevalues)[i];
878                         add_future_value(pfv.writer, pfv.act);
879                 }
880                 futurevalues->clear();
881         }
882
883         mo_graph->commitChanges();
884         mo_check_promises(curr->get_tid(), curr);
885
886         get_thread(curr)->set_return_value(VALUE_NONE);
887         return updated_mod_order || updated_promises;
888 }
889
890 /**
891  * Process a fence ModelAction
892  * @param curr The ModelAction to process
893  * @return True if synchronization was updated
894  */
895 bool ModelChecker::process_fence(ModelAction *curr)
896 {
897         /*
898          * fence-relaxed: no-op
899          * fence-release: only log the occurence (not in this function), for
900          *   use in later synchronization
901          * fence-acquire (this function): search for hypothetical release
902          *   sequences
903          */
904         bool updated = false;
905         if (curr->is_acquire()) {
906                 action_list_t *list = action_trace;
907                 action_list_t::reverse_iterator rit;
908                 /* Find X : is_read(X) && X --sb-> curr */
909                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
910                         ModelAction *act = *rit;
911                         if (act == curr)
912                                 continue;
913                         if (act->get_tid() != curr->get_tid())
914                                 continue;
915                         /* Stop at the beginning of the thread */
916                         if (act->is_thread_start())
917                                 break;
918                         /* Stop once we reach a prior fence-acquire */
919                         if (act->is_fence() && act->is_acquire())
920                                 break;
921                         if (!act->is_read())
922                                 continue;
923                         /* read-acquire will find its own release sequences */
924                         if (act->is_acquire())
925                                 continue;
926
927                         /* Establish hypothetical release sequences */
928                         rel_heads_list_t release_heads;
929                         get_release_seq_heads(curr, act, &release_heads);
930                         for (unsigned int i = 0; i < release_heads.size(); i++)
931                                 if (!curr->synchronize_with(release_heads[i]))
932                                         set_bad_synchronization();
933                         if (release_heads.size() != 0)
934                                 updated = true;
935                 }
936         }
937         return updated;
938 }
939
940 /**
941  * @brief Process the current action for thread-related activity
942  *
943  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
944  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
945  * synchronization, etc.  This function is a no-op for non-THREAD actions
946  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
947  *
948  * @param curr The current action
949  * @return True if synchronization was updated or a thread completed
950  */
951 bool ModelChecker::process_thread_action(ModelAction *curr)
952 {
953         bool updated = false;
954
955         switch (curr->get_type()) {
956         case THREAD_CREATE: {
957                 Thread *th = curr->get_thread_operand();
958                 th->set_creation(curr);
959                 break;
960         }
961         case THREAD_JOIN: {
962                 Thread *blocking = curr->get_thread_operand();
963                 ModelAction *act = get_last_action(blocking->get_id());
964                 curr->synchronize_with(act);
965                 updated = true; /* trigger rel-seq checks */
966                 break;
967         }
968         case THREAD_FINISH: {
969                 Thread *th = get_thread(curr);
970                 while (!th->wait_list_empty()) {
971                         ModelAction *act = th->pop_wait_list();
972                         scheduler->wake(get_thread(act));
973                 }
974                 th->complete();
975                 updated = true; /* trigger rel-seq checks */
976                 break;
977         }
978         case THREAD_START: {
979                 check_promises(curr->get_tid(), NULL, curr->get_cv());
980                 break;
981         }
982         default:
983                 break;
984         }
985
986         return updated;
987 }
988
989 /**
990  * @brief Process the current action for release sequence fixup activity
991  *
992  * Performs model-checker release sequence fixups for the current action,
993  * forcing a single pending release sequence to break (with a given, potential
994  * "loose" write) or to complete (i.e., synchronize). If a pending release
995  * sequence forms a complete release sequence, then we must perform the fixup
996  * synchronization, mo_graph additions, etc.
997  *
998  * @param curr The current action; must be a release sequence fixup action
999  * @param work_queue The work queue to which to add work items as they are
1000  * generated
1001  */
1002 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1003 {
1004         const ModelAction *write = curr->get_node()->get_relseq_break();
1005         struct release_seq *sequence = pending_rel_seqs->back();
1006         pending_rel_seqs->pop_back();
1007         ASSERT(sequence);
1008         ModelAction *acquire = sequence->acquire;
1009         const ModelAction *rf = sequence->rf;
1010         const ModelAction *release = sequence->release;
1011         ASSERT(acquire);
1012         ASSERT(release);
1013         ASSERT(rf);
1014         ASSERT(release->same_thread(rf));
1015
1016         if (write == NULL) {
1017                 /**
1018                  * @todo Forcing a synchronization requires that we set
1019                  * modification order constraints. For instance, we can't allow
1020                  * a fixup sequence in which two separate read-acquire
1021                  * operations read from the same sequence, where the first one
1022                  * synchronizes and the other doesn't. Essentially, we can't
1023                  * allow any writes to insert themselves between 'release' and
1024                  * 'rf'
1025                  */
1026
1027                 /* Must synchronize */
1028                 if (!acquire->synchronize_with(release)) {
1029                         set_bad_synchronization();
1030                         return;
1031                 }
1032                 /* Re-check all pending release sequences */
1033                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1034                 /* Re-check act for mo_graph edges */
1035                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1036
1037                 /* propagate synchronization to later actions */
1038                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1039                 for (; (*rit) != acquire; rit++) {
1040                         ModelAction *propagate = *rit;
1041                         if (acquire->happens_before(propagate)) {
1042                                 propagate->synchronize_with(acquire);
1043                                 /* Re-check 'propagate' for mo_graph edges */
1044                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1045                         }
1046                 }
1047         } else {
1048                 /* Break release sequence with new edges:
1049                  *   release --mo--> write --mo--> rf */
1050                 mo_graph->addEdge(release, write);
1051                 mo_graph->addEdge(write, rf);
1052         }
1053
1054         /* See if we have realized a data race */
1055         checkDataRaces();
1056 }
1057
1058 /**
1059  * Initialize the current action by performing one or more of the following
1060  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1061  * in the NodeStack, manipulating backtracking sets, allocating and
1062  * initializing clock vectors, and computing the promises to fulfill.
1063  *
1064  * @param curr The current action, as passed from the user context; may be
1065  * freed/invalidated after the execution of this function, with a different
1066  * action "returned" its place (pass-by-reference)
1067  * @return True if curr is a newly-explored action; false otherwise
1068  */
1069 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1070 {
1071         ModelAction *newcurr;
1072
1073         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1074                 newcurr = process_rmw(*curr);
1075                 delete *curr;
1076
1077                 if (newcurr->is_rmw())
1078                         compute_promises(newcurr);
1079
1080                 *curr = newcurr;
1081                 return false;
1082         }
1083
1084         (*curr)->set_seq_number(get_next_seq_num());
1085
1086         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1087         if (newcurr) {
1088                 /* First restore type and order in case of RMW operation */
1089                 if ((*curr)->is_rmwr())
1090                         newcurr->copy_typeandorder(*curr);
1091
1092                 ASSERT((*curr)->get_location() == newcurr->get_location());
1093                 newcurr->copy_from_new(*curr);
1094
1095                 /* Discard duplicate ModelAction; use action from NodeStack */
1096                 delete *curr;
1097
1098                 /* Always compute new clock vector */
1099                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1100
1101                 *curr = newcurr;
1102                 return false; /* Action was explored previously */
1103         } else {
1104                 newcurr = *curr;
1105
1106                 /* Always compute new clock vector */
1107                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1108
1109                 /* Assign most recent release fence */
1110                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1111
1112                 /*
1113                  * Perform one-time actions when pushing new ModelAction onto
1114                  * NodeStack
1115                  */
1116                 if (newcurr->is_write())
1117                         compute_promises(newcurr);
1118                 else if (newcurr->is_relseq_fixup())
1119                         compute_relseq_breakwrites(newcurr);
1120                 else if (newcurr->is_wait())
1121                         newcurr->get_node()->set_misc_max(2);
1122                 else if (newcurr->is_notify_one()) {
1123                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1124                 }
1125                 return true; /* This was a new ModelAction */
1126         }
1127 }
1128
1129 /**
1130  * @brief Establish reads-from relation between two actions
1131  *
1132  * Perform basic operations involved with establishing a concrete rf relation,
1133  * including setting the ModelAction data and checking for release sequences.
1134  *
1135  * @param act The action that is reading (must be a read)
1136  * @param rf The action from which we are reading (must be a write)
1137  *
1138  * @return True if this read established synchronization
1139  */
1140 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1141 {
1142         act->set_read_from(rf);
1143         if (rf != NULL && act->is_acquire()) {
1144                 rel_heads_list_t release_heads;
1145                 get_release_seq_heads(act, act, &release_heads);
1146                 int num_heads = release_heads.size();
1147                 for (unsigned int i = 0; i < release_heads.size(); i++)
1148                         if (!act->synchronize_with(release_heads[i])) {
1149                                 set_bad_synchronization();
1150                                 num_heads--;
1151                         }
1152                 return num_heads > 0;
1153         }
1154         return false;
1155 }
1156
1157 /**
1158  * @brief Check whether a model action is enabled.
1159  *
1160  * Checks whether a lock or join operation would be successful (i.e., is the
1161  * lock already locked, or is the joined thread already complete). If not, put
1162  * the action in a waiter list.
1163  *
1164  * @param curr is the ModelAction to check whether it is enabled.
1165  * @return a bool that indicates whether the action is enabled.
1166  */
1167 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1168         if (curr->is_lock()) {
1169                 std::mutex *lock = (std::mutex *)curr->get_location();
1170                 struct std::mutex_state *state = lock->get_state();
1171                 if (state->islocked) {
1172                         //Stick the action in the appropriate waiting queue
1173                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1174                         return false;
1175                 }
1176         } else if (curr->get_type() == THREAD_JOIN) {
1177                 Thread *blocking = (Thread *)curr->get_location();
1178                 if (!blocking->is_complete()) {
1179                         blocking->push_wait_list(curr);
1180                         return false;
1181                 }
1182         }
1183
1184         return true;
1185 }
1186
1187 /**
1188  * Stores the ModelAction for the current thread action.  Call this
1189  * immediately before switching from user- to system-context to pass
1190  * data between them.
1191  * @param act The ModelAction created by the user-thread action
1192  */
1193 void ModelChecker::set_current_action(ModelAction *act) {
1194         priv->current_action = act;
1195 }
1196
1197 /**
1198  * This is the heart of the model checker routine. It performs model-checking
1199  * actions corresponding to a given "current action." Among other processes, it
1200  * calculates reads-from relationships, updates synchronization clock vectors,
1201  * forms a memory_order constraints graph, and handles replay/backtrack
1202  * execution when running permutations of previously-observed executions.
1203  *
1204  * @param curr The current action to process
1205  * @return The ModelAction that is actually executed; may be different than
1206  * curr; may be NULL, if the current action is not enabled to run
1207  */
1208 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1209 {
1210         ASSERT(curr);
1211         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1212
1213         if (!check_action_enabled(curr)) {
1214                 /* Make the execution look like we chose to run this action
1215                  * much later, when a lock/join can succeed */
1216                 get_thread(curr)->set_pending(curr);
1217                 scheduler->sleep(get_thread(curr));
1218                 return NULL;
1219         }
1220
1221         bool newly_explored = initialize_curr_action(&curr);
1222
1223         DBG();
1224         if (DBG_ENABLED())
1225                 curr->print();
1226
1227         wake_up_sleeping_actions(curr);
1228
1229         /* Add the action to lists before any other model-checking tasks */
1230         if (!second_part_of_rmw)
1231                 add_action_to_lists(curr);
1232
1233         /* Build may_read_from set for newly-created actions */
1234         if (newly_explored && curr->is_read())
1235                 build_reads_from_past(curr);
1236
1237         /* Initialize work_queue with the "current action" work */
1238         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1239         while (!work_queue.empty() && !has_asserted()) {
1240                 WorkQueueEntry work = work_queue.front();
1241                 work_queue.pop_front();
1242
1243                 switch (work.type) {
1244                 case WORK_CHECK_CURR_ACTION: {
1245                         ModelAction *act = work.action;
1246                         bool update = false; /* update this location's release seq's */
1247                         bool update_all = false; /* update all release seq's */
1248
1249                         if (process_thread_action(curr))
1250                                 update_all = true;
1251
1252                         if (act->is_read() && process_read(act, second_part_of_rmw))
1253                                 update = true;
1254
1255                         if (act->is_write() && process_write(act))
1256                                 update = true;
1257
1258                         if (act->is_fence() && process_fence(act))
1259                                 update_all = true;
1260
1261                         if (act->is_mutex_op() && process_mutex(act))
1262                                 update_all = true;
1263
1264                         if (act->is_relseq_fixup())
1265                                 process_relseq_fixup(curr, &work_queue);
1266
1267                         if (update_all)
1268                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1269                         else if (update)
1270                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1271                         break;
1272                 }
1273                 case WORK_CHECK_RELEASE_SEQ:
1274                         resolve_release_sequences(work.location, &work_queue);
1275                         break;
1276                 case WORK_CHECK_MO_EDGES: {
1277                         /** @todo Complete verification of work_queue */
1278                         ModelAction *act = work.action;
1279                         bool updated = false;
1280
1281                         if (act->is_read()) {
1282                                 const ModelAction *rf = act->get_reads_from();
1283                                 if (rf != NULL && r_modification_order(act, rf))
1284                                         updated = true;
1285                         }
1286                         if (act->is_write()) {
1287                                 if (w_modification_order(act))
1288                                         updated = true;
1289                         }
1290                         mo_graph->commitChanges();
1291
1292                         if (updated)
1293                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1294                         break;
1295                 }
1296                 default:
1297                         ASSERT(false);
1298                         break;
1299                 }
1300         }
1301
1302         check_curr_backtracking(curr);
1303         set_backtracking(curr);
1304         return curr;
1305 }
1306
1307 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1308 {
1309         Node *currnode = curr->get_node();
1310         Node *parnode = currnode->get_parent();
1311
1312         if ((parnode && !parnode->backtrack_empty()) ||
1313                          !currnode->misc_empty() ||
1314                          !currnode->read_from_empty() ||
1315                          !currnode->future_value_empty() ||
1316                          !currnode->promise_empty() ||
1317                          !currnode->relseq_break_empty()) {
1318                 set_latest_backtrack(curr);
1319         }
1320 }
1321
1322 bool ModelChecker::promises_expired() const
1323 {
1324         for (unsigned int i = 0; i < promises->size(); i++) {
1325                 Promise *promise = (*promises)[i];
1326                 if (promise->get_expiration() < priv->used_sequence_numbers)
1327                         return true;
1328         }
1329         return false;
1330 }
1331
1332 /**
1333  * This is the strongest feasibility check available.
1334  * @return whether the current trace (partial or complete) must be a prefix of
1335  * a feasible trace.
1336  */
1337 bool ModelChecker::isfeasibleprefix() const
1338 {
1339         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1340 }
1341
1342 /**
1343  * Returns whether the current completed trace is feasible, except for pending
1344  * release sequences.
1345  */
1346 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1347 {
1348         if (DBG_ENABLED() && promises->size() != 0)
1349                 DEBUG("Infeasible: unrevolved promises\n");
1350
1351         return !is_infeasible() && promises->size() == 0;
1352 }
1353
1354 /**
1355  * Check if the current partial trace is infeasible. Does not check any
1356  * end-of-execution flags, which might rule out the execution. Thus, this is
1357  * useful only for ruling an execution as infeasible.
1358  * @return whether the current partial trace is infeasible.
1359  */
1360 bool ModelChecker::is_infeasible() const
1361 {
1362         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1363                 DEBUG("Infeasible: RMW violation\n");
1364
1365         return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1366 }
1367
1368 /**
1369  * Check If the current partial trace is infeasible, while ignoring
1370  * infeasibility related to 2 RMW's reading from the same store. It does not
1371  * check end-of-execution feasibility.
1372  * @see ModelChecker::is_infeasible
1373  * @return whether the current partial trace is infeasible, ignoring multiple
1374  * RMWs reading from the same store.
1375  * */
1376 bool ModelChecker::is_infeasible_ignoreRMW() const
1377 {
1378         if (DBG_ENABLED()) {
1379                 if (mo_graph->checkForCycles())
1380                         DEBUG("Infeasible: modification order cycles\n");
1381                 if (priv->failed_promise)
1382                         DEBUG("Infeasible: failed promise\n");
1383                 if (priv->too_many_reads)
1384                         DEBUG("Infeasible: too many reads\n");
1385                 if (priv->bad_synchronization)
1386                         DEBUG("Infeasible: bad synchronization ordering\n");
1387                 if (promises_expired())
1388                         DEBUG("Infeasible: promises expired\n");
1389         }
1390         return mo_graph->checkForCycles() || priv->failed_promise ||
1391                 priv->too_many_reads || priv->bad_synchronization ||
1392                 promises_expired();
1393 }
1394
1395 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1396 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1397         ModelAction *lastread = get_last_action(act->get_tid());
1398         lastread->process_rmw(act);
1399         if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1400                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1401                 mo_graph->commitChanges();
1402         }
1403         return lastread;
1404 }
1405
1406 /**
1407  * Checks whether a thread has read from the same write for too many times
1408  * without seeing the effects of a later write.
1409  *
1410  * Basic idea:
1411  * 1) there must a different write that we could read from that would satisfy the modification order,
1412  * 2) we must have read from the same value in excess of maxreads times, and
1413  * 3) that other write must have been in the reads_from set for maxreads times.
1414  *
1415  * If so, we decide that the execution is no longer feasible.
1416  */
1417 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1418 {
1419         if (params.maxreads != 0) {
1420                 if (curr->get_node()->get_read_from_size() <= 1)
1421                         return;
1422                 //Must make sure that execution is currently feasible...  We could
1423                 //accidentally clear by rolling back
1424                 if (is_infeasible())
1425                         return;
1426                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1427                 int tid = id_to_int(curr->get_tid());
1428
1429                 /* Skip checks */
1430                 if ((int)thrd_lists->size() <= tid)
1431                         return;
1432                 action_list_t *list = &(*thrd_lists)[tid];
1433
1434                 action_list_t::reverse_iterator rit = list->rbegin();
1435                 /* Skip past curr */
1436                 for (; (*rit) != curr; rit++)
1437                         ;
1438                 /* go past curr now */
1439                 rit++;
1440
1441                 action_list_t::reverse_iterator ritcopy = rit;
1442                 //See if we have enough reads from the same value
1443                 int count = 0;
1444                 for (; count < params.maxreads; rit++, count++) {
1445                         if (rit == list->rend())
1446                                 return;
1447                         ModelAction *act = *rit;
1448                         if (!act->is_read())
1449                                 return;
1450
1451                         if (act->get_reads_from() != rf)
1452                                 return;
1453                         if (act->get_node()->get_read_from_size() <= 1)
1454                                 return;
1455                 }
1456                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1457                         /* Get write */
1458                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1459
1460                         /* Need a different write */
1461                         if (write == rf)
1462                                 continue;
1463
1464                         /* Test to see whether this is a feasible write to read from */
1465                         mo_graph->startChanges();
1466                         r_modification_order(curr, write);
1467                         bool feasiblereadfrom = !is_infeasible();
1468                         mo_graph->rollbackChanges();
1469
1470                         if (!feasiblereadfrom)
1471                                 continue;
1472                         rit = ritcopy;
1473
1474                         bool feasiblewrite = true;
1475                         //new we need to see if this write works for everyone
1476
1477                         for (int loop = count; loop > 0; loop--, rit++) {
1478                                 ModelAction *act = *rit;
1479                                 bool foundvalue = false;
1480                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1481                                         if (act->get_node()->get_read_from_at(j) == write) {
1482                                                 foundvalue = true;
1483                                                 break;
1484                                         }
1485                                 }
1486                                 if (!foundvalue) {
1487                                         feasiblewrite = false;
1488                                         break;
1489                                 }
1490                         }
1491                         if (feasiblewrite) {
1492                                 priv->too_many_reads = true;
1493                                 return;
1494                         }
1495                 }
1496         }
1497 }
1498
1499 /**
1500  * Updates the mo_graph with the constraints imposed from the current
1501  * read.
1502  *
1503  * Basic idea is the following: Go through each other thread and find
1504  * the lastest action that happened before our read.  Two cases:
1505  *
1506  * (1) The action is a write => that write must either occur before
1507  * the write we read from or be the write we read from.
1508  *
1509  * (2) The action is a read => the write that that action read from
1510  * must occur before the write we read from or be the same write.
1511  *
1512  * @param curr The current action. Must be a read.
1513  * @param rf The action that curr reads from. Must be a write.
1514  * @return True if modification order edges were added; false otherwise
1515  */
1516 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1517 {
1518         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1519         unsigned int i;
1520         bool added = false;
1521         ASSERT(curr->is_read());
1522
1523         /* Last SC fence in the current thread */
1524         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1525
1526         /* Iterate over all threads */
1527         for (i = 0; i < thrd_lists->size(); i++) {
1528                 /* Last SC fence in thread i */
1529                 ModelAction *last_sc_fence_thread_local = NULL;
1530                 if (int_to_id((int)i) != curr->get_tid())
1531                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1532
1533                 /* Last SC fence in thread i, before last SC fence in current thread */
1534                 ModelAction *last_sc_fence_thread_before = NULL;
1535                 if (last_sc_fence_local)
1536                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1537
1538                 /* Iterate over actions in thread, starting from most recent */
1539                 action_list_t *list = &(*thrd_lists)[i];
1540                 action_list_t::reverse_iterator rit;
1541                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1542                         ModelAction *act = *rit;
1543
1544                         if (act->is_write() && act != rf && act != curr) {
1545                                 /* C++, Section 29.3 statement 5 */
1546                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1547                                                 *act < *last_sc_fence_thread_local) {
1548                                         mo_graph->addEdge(act, rf);
1549                                         added = true;
1550                                         break;
1551                                 }
1552                                 /* C++, Section 29.3 statement 4 */
1553                                 else if (act->is_seqcst() && last_sc_fence_local &&
1554                                                 *act < *last_sc_fence_local) {
1555                                         mo_graph->addEdge(act, rf);
1556                                         added = true;
1557                                         break;
1558                                 }
1559                                 /* C++, Section 29.3 statement 6 */
1560                                 else if (last_sc_fence_thread_before &&
1561                                                 *act < *last_sc_fence_thread_before) {
1562                                         mo_graph->addEdge(act, rf);
1563                                         added = true;
1564                                         break;
1565                                 }
1566                         }
1567
1568                         /*
1569                          * Include at most one act per-thread that "happens
1570                          * before" curr. Don't consider reflexively.
1571                          */
1572                         if (act->happens_before(curr) && act != curr) {
1573                                 if (act->is_write()) {
1574                                         if (rf != act) {
1575                                                 mo_graph->addEdge(act, rf);
1576                                                 added = true;
1577                                         }
1578                                 } else {
1579                                         const ModelAction *prevreadfrom = act->get_reads_from();
1580                                         //if the previous read is unresolved, keep going...
1581                                         if (prevreadfrom == NULL)
1582                                                 continue;
1583
1584                                         if (rf != prevreadfrom) {
1585                                                 mo_graph->addEdge(prevreadfrom, rf);
1586                                                 added = true;
1587                                         }
1588                                 }
1589                                 break;
1590                         }
1591                 }
1592         }
1593
1594         return added;
1595 }
1596
1597 /** This method fixes up the modification order when we resolve a
1598  *  promises.  The basic problem is that actions that occur after the
1599  *  read curr could not property add items to the modification order
1600  *  for our read.
1601  *
1602  *  So for each thread, we find the earliest item that happens after
1603  *  the read curr.  This is the item we have to fix up with additional
1604  *  constraints.  If that action is write, we add a MO edge between
1605  *  the Action rf and that action.  If the action is a read, we add a
1606  *  MO edge between the Action rf, and whatever the read accessed.
1607  *
1608  * @param curr is the read ModelAction that we are fixing up MO edges for.
1609  * @param rf is the write ModelAction that curr reads from.
1610  *
1611  */
1612 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1613 {
1614         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1615         unsigned int i;
1616         ASSERT(curr->is_read());
1617
1618         /* Iterate over all threads */
1619         for (i = 0; i < thrd_lists->size(); i++) {
1620                 /* Iterate over actions in thread, starting from most recent */
1621                 action_list_t *list = &(*thrd_lists)[i];
1622                 action_list_t::reverse_iterator rit;
1623                 ModelAction *lastact = NULL;
1624
1625                 /* Find last action that happens after curr that is either not curr or a rmw */
1626                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1627                         ModelAction *act = *rit;
1628                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1629                                 lastact = act;
1630                         } else
1631                                 break;
1632                 }
1633
1634                         /* Include at most one act per-thread that "happens before" curr */
1635                 if (lastact != NULL) {
1636                         if (lastact == curr) {
1637                                 //Case 1: The resolved read is a RMW, and we need to make sure
1638                                 //that the write portion of the RMW mod order after rf
1639
1640                                 mo_graph->addEdge(rf, lastact);
1641                         } else if (lastact->is_read()) {
1642                                 //Case 2: The resolved read is a normal read and the next
1643                                 //operation is a read, and we need to make sure the value read
1644                                 //is mod ordered after rf
1645
1646                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1647                                 if (postreadfrom != NULL && rf != postreadfrom)
1648                                         mo_graph->addEdge(rf, postreadfrom);
1649                         } else {
1650                                 //Case 3: The resolved read is a normal read and the next
1651                                 //operation is a write, and we need to make sure that the
1652                                 //write is mod ordered after rf
1653                                 if (lastact != rf)
1654                                         mo_graph->addEdge(rf, lastact);
1655                         }
1656                         break;
1657                 }
1658         }
1659 }
1660
1661 /**
1662  * Updates the mo_graph with the constraints imposed from the current write.
1663  *
1664  * Basic idea is the following: Go through each other thread and find
1665  * the lastest action that happened before our write.  Two cases:
1666  *
1667  * (1) The action is a write => that write must occur before
1668  * the current write
1669  *
1670  * (2) The action is a read => the write that that action read from
1671  * must occur before the current write.
1672  *
1673  * This method also handles two other issues:
1674  *
1675  * (I) Sequential Consistency: Making sure that if the current write is
1676  * seq_cst, that it occurs after the previous seq_cst write.
1677  *
1678  * (II) Sending the write back to non-synchronizing reads.
1679  *
1680  * @param curr The current action. Must be a write.
1681  * @return True if modification order edges were added; false otherwise
1682  */
1683 bool ModelChecker::w_modification_order(ModelAction *curr)
1684 {
1685         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1686         unsigned int i;
1687         bool added = false;
1688         ASSERT(curr->is_write());
1689
1690         if (curr->is_seqcst()) {
1691                 /* We have to at least see the last sequentially consistent write,
1692                          so we are initialized. */
1693                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1694                 if (last_seq_cst != NULL) {
1695                         mo_graph->addEdge(last_seq_cst, curr);
1696                         added = true;
1697                 }
1698         }
1699
1700         /* Last SC fence in the current thread */
1701         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1702
1703         /* Iterate over all threads */
1704         for (i = 0; i < thrd_lists->size(); i++) {
1705                 /* Last SC fence in thread i, before last SC fence in current thread */
1706                 ModelAction *last_sc_fence_thread_before = NULL;
1707                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1708                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1709
1710                 /* Iterate over actions in thread, starting from most recent */
1711                 action_list_t *list = &(*thrd_lists)[i];
1712                 action_list_t::reverse_iterator rit;
1713                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1714                         ModelAction *act = *rit;
1715                         if (act == curr) {
1716                                 /*
1717                                  * 1) If RMW and it actually read from something, then we
1718                                  * already have all relevant edges, so just skip to next
1719                                  * thread.
1720                                  *
1721                                  * 2) If RMW and it didn't read from anything, we should
1722                                  * whatever edge we can get to speed up convergence.
1723                                  *
1724                                  * 3) If normal write, we need to look at earlier actions, so
1725                                  * continue processing list.
1726                                  */
1727                                 if (curr->is_rmw()) {
1728                                         if (curr->get_reads_from() != NULL)
1729                                                 break;
1730                                         else
1731                                                 continue;
1732                                 } else
1733                                         continue;
1734                         }
1735
1736                         /* C++, Section 29.3 statement 7 */
1737                         if (last_sc_fence_thread_before && act->is_write() &&
1738                                         *act < *last_sc_fence_thread_before) {
1739                                 mo_graph->addEdge(act, curr);
1740                                 added = true;
1741                                 break;
1742                         }
1743
1744                         /*
1745                          * Include at most one act per-thread that "happens
1746                          * before" curr
1747                          */
1748                         if (act->happens_before(curr)) {
1749                                 /*
1750                                  * Note: if act is RMW, just add edge:
1751                                  *   act --mo--> curr
1752                                  * The following edge should be handled elsewhere:
1753                                  *   readfrom(act) --mo--> act
1754                                  */
1755                                 if (act->is_write())
1756                                         mo_graph->addEdge(act, curr);
1757                                 else if (act->is_read()) {
1758                                         //if previous read accessed a null, just keep going
1759                                         if (act->get_reads_from() == NULL)
1760                                                 continue;
1761                                         mo_graph->addEdge(act->get_reads_from(), curr);
1762                                 }
1763                                 added = true;
1764                                 break;
1765                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1766                                                      !act->same_thread(curr)) {
1767                                 /* We have an action that:
1768                                    (1) did not happen before us
1769                                    (2) is a read and we are a write
1770                                    (3) cannot synchronize with us
1771                                    (4) is in a different thread
1772                                    =>
1773                                    that read could potentially read from our write.  Note that
1774                                    these checks are overly conservative at this point, we'll
1775                                    do more checks before actually removing the
1776                                    pendingfuturevalue.
1777
1778                                  */
1779                                 if (thin_air_constraint_may_allow(curr, act)) {
1780                                         if (!is_infeasible() ||
1781                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1782                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1783                                         }
1784                                 }
1785                         }
1786                 }
1787         }
1788
1789         return added;
1790 }
1791
1792 /** Arbitrary reads from the future are not allowed.  Section 29.3
1793  * part 9 places some constraints.  This method checks one result of constraint
1794  * constraint.  Others require compiler support. */
1795 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1796 {
1797         if (!writer->is_rmw())
1798                 return true;
1799
1800         if (!reader->is_rmw())
1801                 return true;
1802
1803         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1804                 if (search == reader)
1805                         return false;
1806                 if (search->get_tid() == reader->get_tid() &&
1807                                 search->happens_before(reader))
1808                         break;
1809         }
1810
1811         return true;
1812 }
1813
1814 /**
1815  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1816  * some constraints. This method checks one the following constraint (others
1817  * require compiler support):
1818  *
1819  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1820  */
1821 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1822 {
1823         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1824         unsigned int i;
1825         /* Iterate over all threads */
1826         for (i = 0; i < thrd_lists->size(); i++) {
1827                 const ModelAction *write_after_read = NULL;
1828
1829                 /* Iterate over actions in thread, starting from most recent */
1830                 action_list_t *list = &(*thrd_lists)[i];
1831                 action_list_t::reverse_iterator rit;
1832                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1833                         ModelAction *act = *rit;
1834
1835                         /* Don't disallow due to act == reader */
1836                         if (!reader->happens_before(act) || reader == act)
1837                                 break;
1838                         else if (act->is_write())
1839                                 write_after_read = act;
1840                         else if (act->is_read() && act->get_reads_from() != NULL)
1841                                 write_after_read = act->get_reads_from();
1842                 }
1843
1844                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1845                         return false;
1846         }
1847         return true;
1848 }
1849
1850 /**
1851  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1852  * The ModelAction under consideration is expected to be taking part in
1853  * release/acquire synchronization as an object of the "reads from" relation.
1854  * Note that this can only provide release sequence support for RMW chains
1855  * which do not read from the future, as those actions cannot be traced until
1856  * their "promise" is fulfilled. Similarly, we may not even establish the
1857  * presence of a release sequence with certainty, as some modification order
1858  * constraints may be decided further in the future. Thus, this function
1859  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1860  * and a boolean representing certainty.
1861  *
1862  * @param rf The action that might be part of a release sequence. Must be a
1863  * write.
1864  * @param release_heads A pass-by-reference style return parameter. After
1865  * execution of this function, release_heads will contain the heads of all the
1866  * relevant release sequences, if any exists with certainty
1867  * @param pending A pass-by-reference style return parameter which is only used
1868  * when returning false (i.e., uncertain). Returns most information regarding
1869  * an uncertain release sequence, including any write operations that might
1870  * break the sequence.
1871  * @return true, if the ModelChecker is certain that release_heads is complete;
1872  * false otherwise
1873  */
1874 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1875                 rel_heads_list_t *release_heads,
1876                 struct release_seq *pending) const
1877 {
1878         /* Only check for release sequences if there are no cycles */
1879         if (mo_graph->checkForCycles())
1880                 return false;
1881
1882         while (rf) {
1883                 ASSERT(rf->is_write());
1884
1885                 if (rf->is_release())
1886                         release_heads->push_back(rf);
1887                 else if (rf->get_last_fence_release())
1888                         release_heads->push_back(rf->get_last_fence_release());
1889                 if (!rf->is_rmw())
1890                         break; /* End of RMW chain */
1891
1892                 /** @todo Need to be smarter here...  In the linux lock
1893                  * example, this will run to the beginning of the program for
1894                  * every acquire. */
1895                 /** @todo The way to be smarter here is to keep going until 1
1896                  * thread has a release preceded by an acquire and you've seen
1897                  *       both. */
1898
1899                 /* acq_rel RMW is a sufficient stopping condition */
1900                 if (rf->is_acquire() && rf->is_release())
1901                         return true; /* complete */
1902
1903                 rf = rf->get_reads_from();
1904         };
1905         if (!rf) {
1906                 /* read from future: need to settle this later */
1907                 pending->rf = NULL;
1908                 return false; /* incomplete */
1909         }
1910
1911         if (rf->is_release())
1912                 return true; /* complete */
1913
1914         /* else relaxed write
1915          * - check for fence-release in the same thread (29.8, stmt. 3)
1916          * - check modification order for contiguous subsequence
1917          *   -> rf must be same thread as release */
1918
1919         const ModelAction *fence_release = rf->get_last_fence_release();
1920         /* Synchronize with a fence-release unconditionally; we don't need to
1921          * find any more "contiguous subsequence..." for it */
1922         if (fence_release)
1923                 release_heads->push_back(fence_release);
1924
1925         int tid = id_to_int(rf->get_tid());
1926         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1927         action_list_t *list = &(*thrd_lists)[tid];
1928         action_list_t::const_reverse_iterator rit;
1929
1930         /* Find rf in the thread list */
1931         rit = std::find(list->rbegin(), list->rend(), rf);
1932         ASSERT(rit != list->rend());
1933
1934         /* Find the last {write,fence}-release */
1935         for (; rit != list->rend(); rit++) {
1936                 if (fence_release && *(*rit) < *fence_release)
1937                         break;
1938                 if ((*rit)->is_release())
1939                         break;
1940         }
1941         if (rit == list->rend()) {
1942                 /* No write-release in this thread */
1943                 return true; /* complete */
1944         } else if (fence_release && *(*rit) < *fence_release) {
1945                 /* The fence-release is more recent (and so, "stronger") than
1946                  * the most recent write-release */
1947                 return true; /* complete */
1948         } /* else, need to establish contiguous release sequence */
1949         ModelAction *release = *rit;
1950
1951         ASSERT(rf->same_thread(release));
1952
1953         pending->writes.clear();
1954
1955         bool certain = true;
1956         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1957                 if (id_to_int(rf->get_tid()) == (int)i)
1958                         continue;
1959                 list = &(*thrd_lists)[i];
1960
1961                 /* Can we ensure no future writes from this thread may break
1962                  * the release seq? */
1963                 bool future_ordered = false;
1964
1965                 ModelAction *last = get_last_action(int_to_id(i));
1966                 Thread *th = get_thread(int_to_id(i));
1967                 if ((last && rf->happens_before(last)) ||
1968                                 !is_enabled(th) ||
1969                                 th->is_complete())
1970                         future_ordered = true;
1971
1972                 ASSERT(!th->is_model_thread() || future_ordered);
1973
1974                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1975                         const ModelAction *act = *rit;
1976                         /* Reach synchronization -> this thread is complete */
1977                         if (act->happens_before(release))
1978                                 break;
1979                         if (rf->happens_before(act)) {
1980                                 future_ordered = true;
1981                                 continue;
1982                         }
1983
1984                         /* Only non-RMW writes can break release sequences */
1985                         if (!act->is_write() || act->is_rmw())
1986                                 continue;
1987
1988                         /* Check modification order */
1989                         if (mo_graph->checkReachable(rf, act)) {
1990                                 /* rf --mo--> act */
1991                                 future_ordered = true;
1992                                 continue;
1993                         }
1994                         if (mo_graph->checkReachable(act, release))
1995                                 /* act --mo--> release */
1996                                 break;
1997                         if (mo_graph->checkReachable(release, act) &&
1998                                       mo_graph->checkReachable(act, rf)) {
1999                                 /* release --mo-> act --mo--> rf */
2000                                 return true; /* complete */
2001                         }
2002                         /* act may break release sequence */
2003                         pending->writes.push_back(act);
2004                         certain = false;
2005                 }
2006                 if (!future_ordered)
2007                         certain = false; /* This thread is uncertain */
2008         }
2009
2010         if (certain) {
2011                 release_heads->push_back(release);
2012                 pending->writes.clear();
2013         } else {
2014                 pending->release = release;
2015                 pending->rf = rf;
2016         }
2017         return certain;
2018 }
2019
2020 /**
2021  * An interface for getting the release sequence head(s) with which a
2022  * given ModelAction must synchronize. This function only returns a non-empty
2023  * result when it can locate a release sequence head with certainty. Otherwise,
2024  * it may mark the internal state of the ModelChecker so that it will handle
2025  * the release sequence at a later time, causing @a acquire to update its
2026  * synchronization at some later point in execution.
2027  *
2028  * @param acquire The 'acquire' action that may synchronize with a release
2029  * sequence
2030  * @param read The read action that may read from a release sequence; this may
2031  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2032  * when 'acquire' is a fence-acquire)
2033  * @param release_heads A pass-by-reference return parameter. Will be filled
2034  * with the head(s) of the release sequence(s), if they exists with certainty.
2035  * @see ModelChecker::release_seq_heads
2036  */
2037 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2038                 ModelAction *read, rel_heads_list_t *release_heads)
2039 {
2040         const ModelAction *rf = read->get_reads_from();
2041         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2042         sequence->acquire = acquire;
2043         sequence->read = read;
2044
2045         if (!release_seq_heads(rf, release_heads, sequence)) {
2046                 /* add act to 'lazy checking' list */
2047                 pending_rel_seqs->push_back(sequence);
2048         } else {
2049                 snapshot_free(sequence);
2050         }
2051 }
2052
2053 /**
2054  * Attempt to resolve all stashed operations that might synchronize with a
2055  * release sequence for a given location. This implements the "lazy" portion of
2056  * determining whether or not a release sequence was contiguous, since not all
2057  * modification order information is present at the time an action occurs.
2058  *
2059  * @param location The location/object that should be checked for release
2060  * sequence resolutions. A NULL value means to check all locations.
2061  * @param work_queue The work queue to which to add work items as they are
2062  * generated
2063  * @return True if any updates occurred (new synchronization, new mo_graph
2064  * edges)
2065  */
2066 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2067 {
2068         bool updated = false;
2069         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2070         while (it != pending_rel_seqs->end()) {
2071                 struct release_seq *pending = *it;
2072                 ModelAction *acquire = pending->acquire;
2073                 const ModelAction *read = pending->read;
2074
2075                 /* Only resolve sequences on the given location, if provided */
2076                 if (location && read->get_location() != location) {
2077                         it++;
2078                         continue;
2079                 }
2080
2081                 const ModelAction *rf = read->get_reads_from();
2082                 rel_heads_list_t release_heads;
2083                 bool complete;
2084                 complete = release_seq_heads(rf, &release_heads, pending);
2085                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2086                         if (!acquire->has_synchronized_with(release_heads[i])) {
2087                                 if (acquire->synchronize_with(release_heads[i]))
2088                                         updated = true;
2089                                 else
2090                                         set_bad_synchronization();
2091                         }
2092                 }
2093
2094                 if (updated) {
2095                         /* Re-check all pending release sequences */
2096                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2097                         /* Re-check read-acquire for mo_graph edges */
2098                         if (acquire->is_read())
2099                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2100
2101                         /* propagate synchronization to later actions */
2102                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2103                         for (; (*rit) != acquire; rit++) {
2104                                 ModelAction *propagate = *rit;
2105                                 if (acquire->happens_before(propagate)) {
2106                                         propagate->synchronize_with(acquire);
2107                                         /* Re-check 'propagate' for mo_graph edges */
2108                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2109                                 }
2110                         }
2111                 }
2112                 if (complete) {
2113                         it = pending_rel_seqs->erase(it);
2114                         snapshot_free(pending);
2115                 } else {
2116                         it++;
2117                 }
2118         }
2119
2120         // If we resolved promises or data races, see if we have realized a data race.
2121         checkDataRaces();
2122
2123         return updated;
2124 }
2125
2126 /**
2127  * Performs various bookkeeping operations for the current ModelAction. For
2128  * instance, adds action to the per-object, per-thread action vector and to the
2129  * action trace list of all thread actions.
2130  *
2131  * @param act is the ModelAction to add.
2132  */
2133 void ModelChecker::add_action_to_lists(ModelAction *act)
2134 {
2135         int tid = id_to_int(act->get_tid());
2136         ModelAction *uninit = NULL;
2137         int uninit_id = -1;
2138         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2139         if (list->empty() && act->is_atomic_var()) {
2140                 uninit = new_uninitialized_action(act->get_location());
2141                 uninit_id = id_to_int(uninit->get_tid());
2142                 list->push_back(uninit);
2143         }
2144         list->push_back(act);
2145
2146         action_trace->push_back(act);
2147         if (uninit)
2148                 action_trace->push_front(uninit);
2149
2150         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2151         if (tid >= (int)vec->size())
2152                 vec->resize(priv->next_thread_id);
2153         (*vec)[tid].push_back(act);
2154         if (uninit)
2155                 (*vec)[uninit_id].push_front(uninit);
2156
2157         if ((int)thrd_last_action->size() <= tid)
2158                 thrd_last_action->resize(get_num_threads());
2159         (*thrd_last_action)[tid] = act;
2160         if (uninit)
2161                 (*thrd_last_action)[uninit_id] = uninit;
2162
2163         if (act->is_fence() && act->is_release()) {
2164                 if ((int)thrd_last_fence_release->size() <= tid)
2165                         thrd_last_fence_release->resize(get_num_threads());
2166                 (*thrd_last_fence_release)[tid] = act;
2167         }
2168
2169         if (act->is_wait()) {
2170                 void *mutex_loc = (void *) act->get_value();
2171                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2172
2173                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2174                 if (tid >= (int)vec->size())
2175                         vec->resize(priv->next_thread_id);
2176                 (*vec)[tid].push_back(act);
2177         }
2178 }
2179
2180 /**
2181  * @brief Get the last action performed by a particular Thread
2182  * @param tid The thread ID of the Thread in question
2183  * @return The last action in the thread
2184  */
2185 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2186 {
2187         int threadid = id_to_int(tid);
2188         if (threadid < (int)thrd_last_action->size())
2189                 return (*thrd_last_action)[id_to_int(tid)];
2190         else
2191                 return NULL;
2192 }
2193
2194 /**
2195  * @brief Get the last fence release performed by a particular Thread
2196  * @param tid The thread ID of the Thread in question
2197  * @return The last fence release in the thread, if one exists; NULL otherwise
2198  */
2199 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2200 {
2201         int threadid = id_to_int(tid);
2202         if (threadid < (int)thrd_last_fence_release->size())
2203                 return (*thrd_last_fence_release)[id_to_int(tid)];
2204         else
2205                 return NULL;
2206 }
2207
2208 /**
2209  * Gets the last memory_order_seq_cst write (in the total global sequence)
2210  * performed on a particular object (i.e., memory location), not including the
2211  * current action.
2212  * @param curr The current ModelAction; also denotes the object location to
2213  * check
2214  * @return The last seq_cst write
2215  */
2216 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2217 {
2218         void *location = curr->get_location();
2219         action_list_t *list = get_safe_ptr_action(obj_map, location);
2220         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2221         action_list_t::reverse_iterator rit;
2222         for (rit = list->rbegin(); rit != list->rend(); rit++)
2223                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2224                         return *rit;
2225         return NULL;
2226 }
2227
2228 /**
2229  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2230  * performed in a particular thread, prior to a particular fence.
2231  * @param tid The ID of the thread to check
2232  * @param before_fence The fence from which to begin the search; if NULL, then
2233  * search for the most recent fence in the thread.
2234  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2235  */
2236 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2237 {
2238         /* All fences should have NULL location */
2239         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2240         action_list_t::reverse_iterator rit = list->rbegin();
2241
2242         if (before_fence) {
2243                 for (; rit != list->rend(); rit++)
2244                         if (*rit == before_fence)
2245                                 break;
2246
2247                 ASSERT(*rit == before_fence);
2248                 rit++;
2249         }
2250
2251         for (; rit != list->rend(); rit++)
2252                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2253                         return *rit;
2254         return NULL;
2255 }
2256
2257 /**
2258  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2259  * location). This function identifies the mutex according to the current
2260  * action, which is presumed to perform on the same mutex.
2261  * @param curr The current ModelAction; also denotes the object location to
2262  * check
2263  * @return The last unlock operation
2264  */
2265 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2266 {
2267         void *location = curr->get_location();
2268         action_list_t *list = get_safe_ptr_action(obj_map, location);
2269         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2270         action_list_t::reverse_iterator rit;
2271         for (rit = list->rbegin(); rit != list->rend(); rit++)
2272                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2273                         return *rit;
2274         return NULL;
2275 }
2276
2277 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2278 {
2279         ModelAction *parent = get_last_action(tid);
2280         if (!parent)
2281                 parent = get_thread(tid)->get_creation();
2282         return parent;
2283 }
2284
2285 /**
2286  * Returns the clock vector for a given thread.
2287  * @param tid The thread whose clock vector we want
2288  * @return Desired clock vector
2289  */
2290 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2291 {
2292         return get_parent_action(tid)->get_cv();
2293 }
2294
2295 /**
2296  * Resolve a set of Promises with a current write. The set is provided in the
2297  * Node corresponding to @a write.
2298  * @param write The ModelAction that is fulfilling Promises
2299  * @return True if promises were resolved; false otherwise
2300  */
2301 bool ModelChecker::resolve_promises(ModelAction *write)
2302 {
2303         bool resolved = false;
2304         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2305
2306         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2307                 Promise *promise = (*promises)[promise_index];
2308                 if (write->get_node()->get_promise(i)) {
2309                         ModelAction *read = promise->get_action();
2310                         if (read->is_rmw()) {
2311                                 mo_graph->addRMWEdge(write, read);
2312                         }
2313                         read_from(read, write);
2314                         //First fix up the modification order for actions that happened
2315                         //before the read
2316                         r_modification_order(read, write);
2317                         //Next fix up the modification order for actions that happened
2318                         //after the read.
2319                         post_r_modification_order(read, write);
2320                         //Make sure the promise's value matches the write's value
2321                         ASSERT(promise->get_value() == write->get_value());
2322                         delete(promise);
2323
2324                         promises->erase(promises->begin() + promise_index);
2325                         threads_to_check.push_back(read->get_tid());
2326
2327                         resolved = true;
2328                 } else
2329                         promise_index++;
2330         }
2331
2332         //Check whether reading these writes has made threads unable to
2333         //resolve promises
2334
2335         for (unsigned int i = 0; i < threads_to_check.size(); i++)
2336                 mo_check_promises(threads_to_check[i], write);
2337
2338         return resolved;
2339 }
2340
2341 /**
2342  * Compute the set of promises that could potentially be satisfied by this
2343  * action. Note that the set computation actually appears in the Node, not in
2344  * ModelChecker.
2345  * @param curr The ModelAction that may satisfy promises
2346  */
2347 void ModelChecker::compute_promises(ModelAction *curr)
2348 {
2349         for (unsigned int i = 0; i < promises->size(); i++) {
2350                 Promise *promise = (*promises)[i];
2351                 const ModelAction *act = promise->get_action();
2352                 if (!act->happens_before(curr) &&
2353                                 act->is_read() &&
2354                                 !act->could_synchronize_with(curr) &&
2355                                 !act->same_thread(curr) &&
2356                                 act->get_location() == curr->get_location() &&
2357                                 promise->get_value() == curr->get_value()) {
2358                         curr->get_node()->set_promise(i, act->is_rmw());
2359                 }
2360         }
2361 }
2362
2363 /** Checks promises in response to change in ClockVector Threads. */
2364 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2365 {
2366         for (unsigned int i = 0; i < promises->size(); i++) {
2367                 Promise *promise = (*promises)[i];
2368                 const ModelAction *act = promise->get_action();
2369                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2370                                 merge_cv->synchronized_since(act)) {
2371                         if (promise->increment_threads(tid)) {
2372                                 //Promise has failed
2373                                 priv->failed_promise = true;
2374                                 return;
2375                         }
2376                 }
2377         }
2378 }
2379
2380 void ModelChecker::check_promises_thread_disabled() {
2381         for (unsigned int i = 0; i < promises->size(); i++) {
2382                 Promise *promise = (*promises)[i];
2383                 if (promise->check_promise()) {
2384                         priv->failed_promise = true;
2385                         return;
2386                 }
2387         }
2388 }
2389
2390 /** Checks promises in response to addition to modification order for threads.
2391  * Definitions:
2392  * pthread is the thread that performed the read that created the promise
2393  *
2394  * pread is the read that created the promise
2395  *
2396  * pwrite is either the first write to same location as pread by
2397  * pthread that is sequenced after pread or the value read by the
2398  * first read to the same lcoation as pread by pthread that is
2399  * sequenced after pread..
2400  *
2401  *      1. If tid=pthread, then we check what other threads are reachable
2402  * through the mode order starting with pwrite.  Those threads cannot
2403  * perform a write that will resolve the promise due to modification
2404  * order constraints.
2405  *
2406  * 2. If the tid is not pthread, we check whether pwrite can reach the
2407  * action write through the modification order.  If so, that thread
2408  * cannot perform a future write that will resolve the promise due to
2409  * modificatin order constraints.
2410  *
2411  *      @param tid The thread that either read from the model action
2412  *      write, or actually did the model action write.
2413  *
2414  *      @param write The ModelAction representing the relevant write.
2415  */
2416 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2417 {
2418         void *location = write->get_location();
2419         for (unsigned int i = 0; i < promises->size(); i++) {
2420                 Promise *promise = (*promises)[i];
2421                 const ModelAction *act = promise->get_action();
2422
2423                 //Is this promise on the same location?
2424                 if (act->get_location() != location)
2425                         continue;
2426
2427                 //same thread as the promise
2428                 if (act->get_tid() == tid) {
2429
2430                         //do we have a pwrite for the promise, if not, set it
2431                         if (promise->get_write() == NULL) {
2432                                 promise->set_write(write);
2433                                 //The pwrite cannot happen before the promise
2434                                 if (write->happens_before(act) && (write != act)) {
2435                                         priv->failed_promise = true;
2436                                         return;
2437                                 }
2438                         }
2439                         if (mo_graph->checkPromise(write, promise)) {
2440                                 priv->failed_promise = true;
2441                                 return;
2442                         }
2443                 }
2444
2445                 //Don't do any lookups twice for the same thread
2446                 if (promise->has_sync_thread(tid))
2447                         continue;
2448
2449                 if (promise->get_write() && mo_graph->checkReachable(promise->get_write(), write)) {
2450                         if (promise->increment_threads(tid)) {
2451                                 priv->failed_promise = true;
2452                                 return;
2453                         }
2454                 }
2455         }
2456 }
2457
2458 /**
2459  * Compute the set of writes that may break the current pending release
2460  * sequence. This information is extracted from previou release sequence
2461  * calculations.
2462  *
2463  * @param curr The current ModelAction. Must be a release sequence fixup
2464  * action.
2465  */
2466 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2467 {
2468         if (pending_rel_seqs->empty())
2469                 return;
2470
2471         struct release_seq *pending = pending_rel_seqs->back();
2472         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2473                 const ModelAction *write = pending->writes[i];
2474                 curr->get_node()->add_relseq_break(write);
2475         }
2476
2477         /* NULL means don't break the sequence; just synchronize */
2478         curr->get_node()->add_relseq_break(NULL);
2479 }
2480
2481 /**
2482  * Build up an initial set of all past writes that this 'read' action may read
2483  * from. This set is determined by the clock vector's "happens before"
2484  * relationship.
2485  * @param curr is the current ModelAction that we are exploring; it must be a
2486  * 'read' operation.
2487  */
2488 void ModelChecker::build_reads_from_past(ModelAction *curr)
2489 {
2490         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2491         unsigned int i;
2492         ASSERT(curr->is_read());
2493
2494         ModelAction *last_sc_write = NULL;
2495
2496         if (curr->is_seqcst())
2497                 last_sc_write = get_last_seq_cst_write(curr);
2498
2499         /* Iterate over all threads */
2500         for (i = 0; i < thrd_lists->size(); i++) {
2501                 /* Iterate over actions in thread, starting from most recent */
2502                 action_list_t *list = &(*thrd_lists)[i];
2503                 action_list_t::reverse_iterator rit;
2504                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2505                         ModelAction *act = *rit;
2506
2507                         /* Only consider 'write' actions */
2508                         if (!act->is_write() || act == curr)
2509                                 continue;
2510
2511                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2512                         bool allow_read = true;
2513
2514                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2515                                 allow_read = false;
2516                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2517                                 allow_read = false;
2518
2519                         if (allow_read)
2520                                 curr->get_node()->add_read_from(act);
2521
2522                         /* Include at most one act per-thread that "happens before" curr */
2523                         if (act->happens_before(curr))
2524                                 break;
2525                 }
2526         }
2527
2528         if (DBG_ENABLED()) {
2529                 model_print("Reached read action:\n");
2530                 curr->print();
2531                 model_print("Printing may_read_from\n");
2532                 curr->get_node()->print_may_read_from();
2533                 model_print("End printing may_read_from\n");
2534         }
2535 }
2536
2537 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2538 {
2539         while (true) {
2540                 /* UNINIT actions don't have a Node, and they never sleep */
2541                 if (write->is_uninitialized())
2542                         return true;
2543                 Node *prevnode = write->get_node()->get_parent();
2544
2545                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2546                 if (write->is_release() && thread_sleep)
2547                         return true;
2548                 if (!write->is_rmw()) {
2549                         return false;
2550                 }
2551                 if (write->get_reads_from() == NULL)
2552                         return true;
2553                 write = write->get_reads_from();
2554         }
2555 }
2556
2557 /**
2558  * @brief Create a new action representing an uninitialized atomic
2559  * @param location The memory location of the atomic object
2560  * @return A pointer to a new ModelAction
2561  */
2562 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2563 {
2564         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2565         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2566         act->create_cv(NULL);
2567         return act;
2568 }
2569
2570 static void print_list(action_list_t *list, int exec_num = -1)
2571 {
2572         action_list_t::iterator it;
2573
2574         model_print("---------------------------------------------------------------------\n");
2575         if (exec_num >= 0)
2576                 model_print("Execution %d:\n", exec_num);
2577
2578         unsigned int hash = 0;
2579
2580         for (it = list->begin(); it != list->end(); it++) {
2581                 (*it)->print();
2582                 hash = hash^(hash<<3)^((*it)->hash());
2583         }
2584         model_print("HASH %u\n", hash);
2585         model_print("---------------------------------------------------------------------\n");
2586 }
2587
2588 #if SUPPORT_MOD_ORDER_DUMP
2589 void ModelChecker::dumpGraph(char *filename) const
2590 {
2591         char buffer[200];
2592         sprintf(buffer, "%s.dot", filename);
2593         FILE *file = fopen(buffer, "w");
2594         fprintf(file, "digraph %s {\n", filename);
2595         mo_graph->dumpNodes(file);
2596         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2597
2598         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2599                 ModelAction *action = *it;
2600                 if (action->is_read()) {
2601                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2602                         if (action->get_reads_from() != NULL)
2603                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2604                 }
2605                 if (thread_array[action->get_tid()] != NULL) {
2606                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2607                 }
2608
2609                 thread_array[action->get_tid()] = action;
2610         }
2611         fprintf(file, "}\n");
2612         model_free(thread_array);
2613         fclose(file);
2614 }
2615 #endif
2616
2617 /** @brief Prints an execution trace summary. */
2618 void ModelChecker::print_summary() const
2619 {
2620 #if SUPPORT_MOD_ORDER_DUMP
2621         scheduler->print();
2622         char buffername[100];
2623         sprintf(buffername, "exec%04u", stats.num_total);
2624         mo_graph->dumpGraphToFile(buffername);
2625         sprintf(buffername, "graph%04u", stats.num_total);
2626         dumpGraph(buffername);
2627 #endif
2628
2629         if (!isfeasibleprefix())
2630                 model_print("INFEASIBLE EXECUTION!\n");
2631         print_list(action_trace, stats.num_total);
2632         model_print("\n");
2633 }
2634
2635 /**
2636  * Add a Thread to the system for the first time. Should only be called once
2637  * per thread.
2638  * @param t The Thread to add
2639  */
2640 void ModelChecker::add_thread(Thread *t)
2641 {
2642         thread_map->put(id_to_int(t->get_id()), t);
2643         scheduler->add_thread(t);
2644 }
2645
2646 /**
2647  * Removes a thread from the scheduler.
2648  * @param the thread to remove.
2649  */
2650 void ModelChecker::remove_thread(Thread *t)
2651 {
2652         scheduler->remove_thread(t);
2653 }
2654
2655 /**
2656  * @brief Get a Thread reference by its ID
2657  * @param tid The Thread's ID
2658  * @return A Thread reference
2659  */
2660 Thread * ModelChecker::get_thread(thread_id_t tid) const
2661 {
2662         return thread_map->get(id_to_int(tid));
2663 }
2664
2665 /**
2666  * @brief Get a reference to the Thread in which a ModelAction was executed
2667  * @param act The ModelAction
2668  * @return A Thread reference
2669  */
2670 Thread * ModelChecker::get_thread(ModelAction *act) const
2671 {
2672         return get_thread(act->get_tid());
2673 }
2674
2675 /**
2676  * @brief Check if a Thread is currently enabled
2677  * @param t The Thread to check
2678  * @return True if the Thread is currently enabled
2679  */
2680 bool ModelChecker::is_enabled(Thread *t) const
2681 {
2682         return scheduler->is_enabled(t);
2683 }
2684
2685 /**
2686  * @brief Check if a Thread is currently enabled
2687  * @param tid The ID of the Thread to check
2688  * @return True if the Thread is currently enabled
2689  */
2690 bool ModelChecker::is_enabled(thread_id_t tid) const
2691 {
2692         return scheduler->is_enabled(tid);
2693 }
2694
2695 /**
2696  * Switch from a user-context to the "master thread" context (a.k.a. system
2697  * context). This switch is made with the intention of exploring a particular
2698  * model-checking action (described by a ModelAction object). Must be called
2699  * from a user-thread context.
2700  *
2701  * @param act The current action that will be explored. May be NULL only if
2702  * trace is exiting via an assertion (see ModelChecker::set_assert and
2703  * ModelChecker::has_asserted).
2704  * @return Return the value returned by the current action
2705  */
2706 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2707 {
2708         DBG();
2709         Thread *old = thread_current();
2710         set_current_action(act);
2711         old->set_state(THREAD_READY);
2712         if (Thread::swap(old, &system_context) < 0) {
2713                 perror("swap threads");
2714                 exit(EXIT_FAILURE);
2715         }
2716         return old->get_return_value();
2717 }
2718
2719 /**
2720  * Takes the next step in the execution, if possible.
2721  * @param curr The current step to take
2722  * @return Returns true (success) if a step was taken and false otherwise.
2723  */
2724 bool ModelChecker::take_step(ModelAction *curr)
2725 {
2726         if (has_asserted())
2727                 return false;
2728
2729         Thread *curr_thrd = get_thread(curr);
2730         ASSERT(curr_thrd->get_state() == THREAD_READY);
2731
2732         curr = check_current_action(curr);
2733
2734         /* Infeasible -> don't take any more steps */
2735         if (is_infeasible())
2736                 return false;
2737         else if (isfeasibleprefix() && have_bug_reports()) {
2738                 set_assert();
2739                 return false;
2740         }
2741
2742         if (params.bound != 0)
2743                 if (priv->used_sequence_numbers > params.bound)
2744                         return false;
2745
2746         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2747                 scheduler->remove_thread(curr_thrd);
2748
2749         Thread *next_thrd = get_next_thread(curr);
2750         next_thrd = scheduler->next_thread(next_thrd);
2751
2752         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2753                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2754
2755         /*
2756          * Launch end-of-execution release sequence fixups only when there are:
2757          *
2758          * (1) no more user threads to run (or when execution replay chooses
2759          *     the 'model_thread')
2760          * (2) pending release sequences
2761          * (3) pending assertions (i.e., data races)
2762          * (4) no pending promises
2763          */
2764         if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2765                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2766                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2767                                 pending_rel_seqs->size());
2768                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2769                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2770                                 model_thread);
2771                 set_current_action(fixup);
2772                 return true;
2773         }
2774
2775         /* next_thrd == NULL -> don't take any more steps */
2776         if (!next_thrd)
2777                 return false;
2778
2779         next_thrd->set_state(THREAD_RUNNING);
2780
2781         if (next_thrd->get_pending() != NULL) {
2782                 /* restart a pending action */
2783                 set_current_action(next_thrd->get_pending());
2784                 next_thrd->set_pending(NULL);
2785                 next_thrd->set_state(THREAD_READY);
2786                 return true;
2787         }
2788
2789         /* Return false only if swap fails with an error */
2790         return (Thread::swap(&system_context, next_thrd) == 0);
2791 }
2792
2793 /** Wrapper to run the user's main function, with appropriate arguments */
2794 void user_main_wrapper(void *)
2795 {
2796         user_main(model->params.argc, model->params.argv);
2797 }
2798
2799 /** @brief Run ModelChecker for the user program */
2800 void ModelChecker::run()
2801 {
2802         do {
2803                 thrd_t user_thread;
2804                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2805
2806                 add_thread(t);
2807
2808                 /* Run user thread up to its first action */
2809                 scheduler->next_thread(t);
2810                 Thread::swap(&system_context, t);
2811
2812                 /* Wait for all threads to complete */
2813                 while (take_step(priv->current_action));
2814         } while (next_execution());
2815
2816         print_stats();
2817 }