model: set thread state during 'swap' calls
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 current_action(NULL),
43                 /* First thread created will have id INITIAL_THREAD_ID */
44                 next_thread_id(INITIAL_THREAD_ID),
45                 used_sequence_numbers(0),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 no_valid_reads(false),
52                 bad_synchronization(false),
53                 asserted(false)
54         { }
55
56         ~model_snapshot_members() {
57                 for (unsigned int i = 0; i < bugs.size(); i++)
58                         delete bugs[i];
59                 bugs.clear();
60         }
61
62         ModelAction *current_action;
63         unsigned int next_thread_id;
64         modelclock_t used_sequence_numbers;
65         ModelAction *next_backtrack;
66         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
67         struct execution_stats stats;
68         bool failed_promise;
69         bool too_many_reads;
70         bool no_valid_reads;
71         /** @brief Incorrectly-ordered synchronization was made */
72         bool bad_synchronization;
73         bool asserted;
74
75         SNAPSHOTALLOC
76 };
77
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80         /* Initialize default scheduler */
81         params(params),
82         scheduler(new Scheduler()),
83         diverge(NULL),
84         earliest_diverge(NULL),
85         action_trace(new action_list_t()),
86         thread_map(new HashTable<int, Thread *, int>()),
87         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
90         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
91         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
92         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
93         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
94         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
95         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
96         node_stack(new NodeStack()),
97         priv(new struct model_snapshot_members()),
98         mo_graph(new CycleGraph())
99 {
100         /* Initialize a model-checker thread, for special ModelActions */
101         model_thread = new Thread(get_next_id());
102         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 }
104
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
107 {
108         for (unsigned int i = 0; i < get_num_threads(); i++)
109                 delete thread_map->get(i);
110         delete thread_map;
111
112         delete obj_thrd_map;
113         delete obj_map;
114         delete lock_waiters_map;
115         delete condvar_waiters_map;
116         delete action_trace;
117
118         for (unsigned int i = 0; i < promises->size(); i++)
119                 delete (*promises)[i];
120         delete promises;
121
122         delete pending_rel_seqs;
123
124         delete thrd_last_action;
125         delete thrd_last_fence_release;
126         delete node_stack;
127         delete scheduler;
128         delete mo_graph;
129         delete priv;
130 }
131
132 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
133 {
134         action_list_t *tmp = hash->get(ptr);
135         if (tmp == NULL) {
136                 tmp = new action_list_t();
137                 hash->put(ptr, tmp);
138         }
139         return tmp;
140 }
141
142 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
143 {
144         std::vector<action_list_t> *tmp = hash->get(ptr);
145         if (tmp == NULL) {
146                 tmp = new std::vector<action_list_t>();
147                 hash->put(ptr, tmp);
148         }
149         return tmp;
150 }
151
152 /**
153  * Restores user program to initial state and resets all model-checker data
154  * structures.
155  */
156 void ModelChecker::reset_to_initial_state()
157 {
158         DEBUG("+++ Resetting to initial state +++\n");
159         node_stack->reset_execution();
160
161         /* Print all model-checker output before rollback */
162         fflush(model_out);
163
164         snapshot_backtrack_before(0);
165 }
166
167 /** @return a thread ID for a new Thread */
168 thread_id_t ModelChecker::get_next_id()
169 {
170         return priv->next_thread_id++;
171 }
172
173 /** @return the number of user threads created during this execution */
174 unsigned int ModelChecker::get_num_threads() const
175 {
176         return priv->next_thread_id;
177 }
178
179 /**
180  * Must be called from user-thread context (e.g., through the global
181  * thread_current() interface)
182  *
183  * @return The currently executing Thread.
184  */
185 Thread * ModelChecker::get_current_thread() const
186 {
187         return scheduler->get_current_thread();
188 }
189
190 /** @return a sequence number for a new ModelAction */
191 modelclock_t ModelChecker::get_next_seq_num()
192 {
193         return ++priv->used_sequence_numbers;
194 }
195
196 Node * ModelChecker::get_curr_node() const
197 {
198         return node_stack->get_head();
199 }
200
201 /**
202  * @brief Choose the next thread to execute.
203  *
204  * This function chooses the next thread that should execute. It can force the
205  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
206  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
207  * The model-checker may have no preference regarding the next thread (i.e.,
208  * when exploring a new execution ordering), in which case this will return
209  * NULL.
210  * @param curr The current ModelAction. This action might guide the choice of
211  * next thread.
212  * @return The next thread to run. If the model-checker has no preference, NULL.
213  */
214 Thread * ModelChecker::get_next_thread(ModelAction *curr)
215 {
216         thread_id_t tid;
217
218         if (curr != NULL) {
219                 /* Do not split atomic actions. */
220                 if (curr->is_rmwr())
221                         return thread_current();
222                 else if (curr->get_type() == THREAD_CREATE)
223                         return curr->get_thread_operand();
224         }
225
226         /* Have we completed exploring the preselected path? */
227         if (diverge == NULL)
228                 return NULL;
229
230         /* Else, we are trying to replay an execution */
231         ModelAction *next = node_stack->get_next()->get_action();
232
233         if (next == diverge) {
234                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
235                         earliest_diverge = diverge;
236
237                 Node *nextnode = next->get_node();
238                 Node *prevnode = nextnode->get_parent();
239                 scheduler->update_sleep_set(prevnode);
240
241                 /* Reached divergence point */
242                 if (nextnode->increment_misc()) {
243                         /* The next node will try to satisfy a different misc_index values. */
244                         tid = next->get_tid();
245                         node_stack->pop_restofstack(2);
246                 } else if (nextnode->increment_promise()) {
247                         /* The next node will try to satisfy a different set of promises. */
248                         tid = next->get_tid();
249                         node_stack->pop_restofstack(2);
250                 } else if (nextnode->increment_read_from()) {
251                         /* The next node will read from a different value. */
252                         tid = next->get_tid();
253                         node_stack->pop_restofstack(2);
254                 } else if (nextnode->increment_future_value()) {
255                         /* The next node will try to read from a different future value. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_relseq_break()) {
259                         /* The next node will try to resolve a release sequence differently */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else {
263                         ASSERT(prevnode);
264                         /* Make a different thread execute for next step */
265                         scheduler->add_sleep(get_thread(next->get_tid()));
266                         tid = prevnode->get_next_backtrack();
267                         /* Make sure the backtracked thread isn't sleeping. */
268                         node_stack->pop_restofstack(1);
269                         if (diverge == earliest_diverge) {
270                                 earliest_diverge = prevnode->get_action();
271                         }
272                 }
273                 /* The correct sleep set is in the parent node. */
274                 execute_sleep_set();
275
276                 DEBUG("*** Divergence point ***\n");
277
278                 diverge = NULL;
279         } else {
280                 tid = next->get_tid();
281         }
282         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
283         ASSERT(tid != THREAD_ID_T_NONE);
284         return thread_map->get(id_to_int(tid));
285 }
286
287 /**
288  * We need to know what the next actions of all threads in the sleep
289  * set will be.  This method computes them and stores the actions at
290  * the corresponding thread object's pending action.
291  */
292
293 void ModelChecker::execute_sleep_set()
294 {
295         for (unsigned int i = 0; i < get_num_threads(); i++) {
296                 thread_id_t tid = int_to_id(i);
297                 Thread *thr = get_thread(tid);
298                 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
299                         scheduler->next_thread(thr);
300                         Thread::swap(&system_context, thr);
301                         priv->current_action->set_sleep_flag();
302                         thr->set_pending(priv->current_action);
303                 }
304         }
305 }
306
307 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
308 {
309         for (unsigned int i = 0; i < get_num_threads(); i++) {
310                 Thread *thr = get_thread(int_to_id(i));
311                 if (scheduler->is_sleep_set(thr)) {
312                         ModelAction *pending_act = thr->get_pending();
313                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
314                                 //Remove this thread from sleep set
315                                 scheduler->remove_sleep(thr);
316                 }
317         }
318 }
319
320 /** @brief Alert the model-checker that an incorrectly-ordered
321  * synchronization was made */
322 void ModelChecker::set_bad_synchronization()
323 {
324         priv->bad_synchronization = true;
325 }
326
327 bool ModelChecker::has_asserted() const
328 {
329         return priv->asserted;
330 }
331
332 void ModelChecker::set_assert()
333 {
334         priv->asserted = true;
335 }
336
337 /**
338  * Check if we are in a deadlock. Should only be called at the end of an
339  * execution, although it should not give false positives in the middle of an
340  * execution (there should be some ENABLED thread).
341  *
342  * @return True if program is in a deadlock; false otherwise
343  */
344 bool ModelChecker::is_deadlocked() const
345 {
346         bool blocking_threads = false;
347         for (unsigned int i = 0; i < get_num_threads(); i++) {
348                 thread_id_t tid = int_to_id(i);
349                 if (is_enabled(tid))
350                         return false;
351                 Thread *t = get_thread(tid);
352                 if (!t->is_model_thread() && t->get_pending())
353                         blocking_threads = true;
354         }
355         return blocking_threads;
356 }
357
358 /**
359  * Check if this is a complete execution. That is, have all thread completed
360  * execution (rather than exiting because sleep sets have forced a redundant
361  * execution).
362  *
363  * @return True if the execution is complete.
364  */
365 bool ModelChecker::is_complete_execution() const
366 {
367         for (unsigned int i = 0; i < get_num_threads(); i++)
368                 if (is_enabled(int_to_id(i)))
369                         return false;
370         return true;
371 }
372
373 /**
374  * @brief Assert a bug in the executing program.
375  *
376  * Use this function to assert any sort of bug in the user program. If the
377  * current trace is feasible (actually, a prefix of some feasible execution),
378  * then this execution will be aborted, printing the appropriate message. If
379  * the current trace is not yet feasible, the error message will be stashed and
380  * printed if the execution ever becomes feasible.
381  *
382  * @param msg Descriptive message for the bug (do not include newline char)
383  * @return True if bug is immediately-feasible
384  */
385 bool ModelChecker::assert_bug(const char *msg)
386 {
387         priv->bugs.push_back(new bug_message(msg));
388
389         if (isfeasibleprefix()) {
390                 set_assert();
391                 return true;
392         }
393         return false;
394 }
395
396 /**
397  * @brief Assert a bug in the executing program, asserted by a user thread
398  * @see ModelChecker::assert_bug
399  * @param msg Descriptive message for the bug (do not include newline char)
400  */
401 void ModelChecker::assert_user_bug(const char *msg)
402 {
403         /* If feasible bug, bail out now */
404         if (assert_bug(msg))
405                 switch_to_master(NULL);
406 }
407
408 /** @return True, if any bugs have been reported for this execution */
409 bool ModelChecker::have_bug_reports() const
410 {
411         return priv->bugs.size() != 0;
412 }
413
414 /** @brief Print bug report listing for this execution (if any bugs exist) */
415 void ModelChecker::print_bugs() const
416 {
417         if (have_bug_reports()) {
418                 model_print("Bug report: %zu bug%s detected\n",
419                                 priv->bugs.size(),
420                                 priv->bugs.size() > 1 ? "s" : "");
421                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
422                         priv->bugs[i]->print();
423         }
424 }
425
426 /**
427  * @brief Record end-of-execution stats
428  *
429  * Must be run when exiting an execution. Records various stats.
430  * @see struct execution_stats
431  */
432 void ModelChecker::record_stats()
433 {
434         stats.num_total++;
435         if (!isfeasibleprefix())
436                 stats.num_infeasible++;
437         else if (have_bug_reports())
438                 stats.num_buggy_executions++;
439         else if (is_complete_execution())
440                 stats.num_complete++;
441         else
442                 stats.num_redundant++;
443 }
444
445 /** @brief Print execution stats */
446 void ModelChecker::print_stats() const
447 {
448         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
449         model_print("Number of redundant executions: %d\n", stats.num_redundant);
450         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
451         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
452         model_print("Total executions: %d\n", stats.num_total);
453         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
454 }
455
456 /**
457  * @brief End-of-exeuction print
458  * @param printbugs Should any existing bugs be printed?
459  */
460 void ModelChecker::print_execution(bool printbugs) const
461 {
462         print_program_output();
463
464         if (DBG_ENABLED() || params.verbose) {
465                 model_print("Earliest divergence point since last feasible execution:\n");
466                 if (earliest_diverge)
467                         earliest_diverge->print();
468                 else
469                         model_print("(Not set)\n");
470
471                 model_print("\n");
472                 print_stats();
473         }
474
475         /* Don't print invalid bugs */
476         if (printbugs)
477                 print_bugs();
478
479         model_print("\n");
480         print_summary();
481 }
482
483 /**
484  * Queries the model-checker for more executions to explore and, if one
485  * exists, resets the model-checker state to execute a new execution.
486  *
487  * @return If there are more executions to explore, return true. Otherwise,
488  * return false.
489  */
490 bool ModelChecker::next_execution()
491 {
492         DBG();
493         /* Is this execution a feasible execution that's worth bug-checking? */
494         bool complete = isfeasibleprefix() && (is_complete_execution() ||
495                         have_bug_reports());
496
497         /* End-of-execution bug checks */
498         if (complete) {
499                 if (is_deadlocked())
500                         assert_bug("Deadlock detected");
501
502                 checkDataRaces();
503         }
504
505         record_stats();
506
507         /* Output */
508         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
509                 print_execution(complete);
510         else
511                 clear_program_output();
512
513         if (complete)
514                 earliest_diverge = NULL;
515
516         if ((diverge = get_next_backtrack()) == NULL)
517                 return false;
518
519         if (DBG_ENABLED()) {
520                 model_print("Next execution will diverge at:\n");
521                 diverge->print();
522         }
523
524         reset_to_initial_state();
525         return true;
526 }
527
528 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
529 {
530         switch (act->get_type()) {
531         case ATOMIC_FENCE:
532         case ATOMIC_READ:
533         case ATOMIC_WRITE:
534         case ATOMIC_RMW: {
535                 /* Optimization: relaxed operations don't need backtracking */
536                 if (act->is_relaxed())
537                         return NULL;
538                 /* linear search: from most recent to oldest */
539                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
540                 action_list_t::reverse_iterator rit;
541                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
542                         ModelAction *prev = *rit;
543                         if (prev->could_synchronize_with(act))
544                                 return prev;
545                 }
546                 break;
547         }
548         case ATOMIC_LOCK:
549         case ATOMIC_TRYLOCK: {
550                 /* linear search: from most recent to oldest */
551                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
552                 action_list_t::reverse_iterator rit;
553                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
554                         ModelAction *prev = *rit;
555                         if (act->is_conflicting_lock(prev))
556                                 return prev;
557                 }
558                 break;
559         }
560         case ATOMIC_UNLOCK: {
561                 /* linear search: from most recent to oldest */
562                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
563                 action_list_t::reverse_iterator rit;
564                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
565                         ModelAction *prev = *rit;
566                         if (!act->same_thread(prev) && prev->is_failed_trylock())
567                                 return prev;
568                 }
569                 break;
570         }
571         case ATOMIC_WAIT: {
572                 /* linear search: from most recent to oldest */
573                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
574                 action_list_t::reverse_iterator rit;
575                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
576                         ModelAction *prev = *rit;
577                         if (!act->same_thread(prev) && prev->is_failed_trylock())
578                                 return prev;
579                         if (!act->same_thread(prev) && prev->is_notify())
580                                 return prev;
581                 }
582                 break;
583         }
584
585         case ATOMIC_NOTIFY_ALL:
586         case ATOMIC_NOTIFY_ONE: {
587                 /* linear search: from most recent to oldest */
588                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
589                 action_list_t::reverse_iterator rit;
590                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
591                         ModelAction *prev = *rit;
592                         if (!act->same_thread(prev) && prev->is_wait())
593                                 return prev;
594                 }
595                 break;
596         }
597         default:
598                 break;
599         }
600         return NULL;
601 }
602
603 /** This method finds backtracking points where we should try to
604  * reorder the parameter ModelAction against.
605  *
606  * @param the ModelAction to find backtracking points for.
607  */
608 void ModelChecker::set_backtracking(ModelAction *act)
609 {
610         Thread *t = get_thread(act);
611         ModelAction *prev = get_last_conflict(act);
612         if (prev == NULL)
613                 return;
614
615         Node *node = prev->get_node()->get_parent();
616
617         int low_tid, high_tid;
618         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
619                 low_tid = id_to_int(act->get_tid());
620                 high_tid = low_tid + 1;
621         } else {
622                 low_tid = 0;
623                 high_tid = get_num_threads();
624         }
625
626         for (int i = low_tid; i < high_tid; i++) {
627                 thread_id_t tid = int_to_id(i);
628
629                 /* Make sure this thread can be enabled here. */
630                 if (i >= node->get_num_threads())
631                         break;
632
633                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
634                 if (node->enabled_status(tid) != THREAD_ENABLED)
635                         continue;
636
637                 /* Check if this has been explored already */
638                 if (node->has_been_explored(tid))
639                         continue;
640
641                 /* See if fairness allows */
642                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
643                         bool unfair = false;
644                         for (int t = 0; t < node->get_num_threads(); t++) {
645                                 thread_id_t tother = int_to_id(t);
646                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
647                                         unfair = true;
648                                         break;
649                                 }
650                         }
651                         if (unfair)
652                                 continue;
653                 }
654                 /* Cache the latest backtracking point */
655                 set_latest_backtrack(prev);
656
657                 /* If this is a new backtracking point, mark the tree */
658                 if (!node->set_backtrack(tid))
659                         continue;
660                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
661                                         id_to_int(prev->get_tid()),
662                                         id_to_int(t->get_id()));
663                 if (DBG_ENABLED()) {
664                         prev->print();
665                         act->print();
666                 }
667         }
668 }
669
670 /**
671  * @brief Cache the a backtracking point as the "most recent", if eligible
672  *
673  * Note that this does not prepare the NodeStack for this backtracking
674  * operation, it only caches the action on a per-execution basis
675  *
676  * @param act The operation at which we should explore a different next action
677  * (i.e., backtracking point)
678  * @return True, if this action is now the most recent backtracking point;
679  * false otherwise
680  */
681 bool ModelChecker::set_latest_backtrack(ModelAction *act)
682 {
683         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
684                 priv->next_backtrack = act;
685                 return true;
686         }
687         return false;
688 }
689
690 /**
691  * Returns last backtracking point. The model checker will explore a different
692  * path for this point in the next execution.
693  * @return The ModelAction at which the next execution should diverge.
694  */
695 ModelAction * ModelChecker::get_next_backtrack()
696 {
697         ModelAction *next = priv->next_backtrack;
698         priv->next_backtrack = NULL;
699         return next;
700 }
701
702 /**
703  * Processes a read or rmw model action.
704  * @param curr is the read model action to process.
705  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
706  * @return True if processing this read updates the mo_graph.
707  */
708 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
709 {
710         uint64_t value = VALUE_NONE;
711         bool updated = false;
712         while (true) {
713                 const ModelAction *reads_from = curr->get_node()->get_read_from();
714                 if (reads_from != NULL) {
715                         mo_graph->startChanges();
716
717                         value = reads_from->get_value();
718                         bool r_status = false;
719
720                         if (!second_part_of_rmw) {
721                                 check_recency(curr, reads_from);
722                                 r_status = r_modification_order(curr, reads_from);
723                         }
724
725                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
726                                 mo_graph->rollbackChanges();
727                                 priv->too_many_reads = false;
728                                 continue;
729                         }
730
731                         read_from(curr, reads_from);
732                         mo_graph->commitChanges();
733                         mo_check_promises(curr, true);
734
735                         updated |= r_status;
736                 } else if (!second_part_of_rmw) {
737                         /* Read from future value */
738                         struct future_value fv = curr->get_node()->get_future_value();
739                         Promise *promise = new Promise(curr, fv);
740                         value = fv.value;
741                         curr->set_read_from_promise(promise);
742                         promises->push_back(promise);
743                         mo_graph->startChanges();
744                         updated = r_modification_order(curr, promise);
745                         mo_graph->commitChanges();
746                 }
747                 get_thread(curr)->set_return_value(value);
748                 return updated;
749         }
750 }
751
752 /**
753  * Processes a lock, trylock, or unlock model action.  @param curr is
754  * the read model action to process.
755  *
756  * The try lock operation checks whether the lock is taken.  If not,
757  * it falls to the normal lock operation case.  If so, it returns
758  * fail.
759  *
760  * The lock operation has already been checked that it is enabled, so
761  * it just grabs the lock and synchronizes with the previous unlock.
762  *
763  * The unlock operation has to re-enable all of the threads that are
764  * waiting on the lock.
765  *
766  * @return True if synchronization was updated; false otherwise
767  */
768 bool ModelChecker::process_mutex(ModelAction *curr)
769 {
770         std::mutex *mutex = NULL;
771         struct std::mutex_state *state = NULL;
772
773         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
774                 mutex = (std::mutex *)curr->get_location();
775                 state = mutex->get_state();
776         } else if (curr->is_wait()) {
777                 mutex = (std::mutex *)curr->get_value();
778                 state = mutex->get_state();
779         }
780
781         switch (curr->get_type()) {
782         case ATOMIC_TRYLOCK: {
783                 bool success = !state->islocked;
784                 curr->set_try_lock(success);
785                 if (!success) {
786                         get_thread(curr)->set_return_value(0);
787                         break;
788                 }
789                 get_thread(curr)->set_return_value(1);
790         }
791                 //otherwise fall into the lock case
792         case ATOMIC_LOCK: {
793                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
794                         assert_bug("Lock access before initialization");
795                 state->islocked = true;
796                 ModelAction *unlock = get_last_unlock(curr);
797                 //synchronize with the previous unlock statement
798                 if (unlock != NULL) {
799                         curr->synchronize_with(unlock);
800                         return true;
801                 }
802                 break;
803         }
804         case ATOMIC_UNLOCK: {
805                 //unlock the lock
806                 state->islocked = false;
807                 //wake up the other threads
808                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
809                 //activate all the waiting threads
810                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
811                         scheduler->wake(get_thread(*rit));
812                 }
813                 waiters->clear();
814                 break;
815         }
816         case ATOMIC_WAIT: {
817                 //unlock the lock
818                 state->islocked = false;
819                 //wake up the other threads
820                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
821                 //activate all the waiting threads
822                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
823                         scheduler->wake(get_thread(*rit));
824                 }
825                 waiters->clear();
826                 //check whether we should go to sleep or not...simulate spurious failures
827                 if (curr->get_node()->get_misc() == 0) {
828                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
829                         //disable us
830                         scheduler->sleep(get_thread(curr));
831                 }
832                 break;
833         }
834         case ATOMIC_NOTIFY_ALL: {
835                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
836                 //activate all the waiting threads
837                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
838                         scheduler->wake(get_thread(*rit));
839                 }
840                 waiters->clear();
841                 break;
842         }
843         case ATOMIC_NOTIFY_ONE: {
844                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
845                 int wakeupthread = curr->get_node()->get_misc();
846                 action_list_t::iterator it = waiters->begin();
847                 advance(it, wakeupthread);
848                 scheduler->wake(get_thread(*it));
849                 waiters->erase(it);
850                 break;
851         }
852
853         default:
854                 ASSERT(0);
855         }
856         return false;
857 }
858
859 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
860 {
861         /* Do more ambitious checks now that mo is more complete */
862         if (mo_may_allow(writer, reader)) {
863                 Node *node = reader->get_node();
864
865                 /* Find an ancestor thread which exists at the time of the reader */
866                 Thread *write_thread = get_thread(writer);
867                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
868                         write_thread = write_thread->get_parent();
869
870                 struct future_value fv = {
871                         writer->get_value(),
872                         writer->get_seq_number() + params.maxfuturedelay,
873                         write_thread->get_id(),
874                 };
875                 if (node->add_future_value(fv))
876                         set_latest_backtrack(reader);
877         }
878 }
879
880 /**
881  * Process a write ModelAction
882  * @param curr The ModelAction to process
883  * @return True if the mo_graph was updated or promises were resolved
884  */
885 bool ModelChecker::process_write(ModelAction *curr)
886 {
887         bool updated_mod_order = w_modification_order(curr);
888         bool updated_promises = resolve_promises(curr);
889
890         if (promises->size() == 0) {
891                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
892                         struct PendingFutureValue pfv = (*futurevalues)[i];
893                         add_future_value(pfv.writer, pfv.act);
894                 }
895                 futurevalues->clear();
896         }
897
898         mo_graph->commitChanges();
899         mo_check_promises(curr, false);
900
901         get_thread(curr)->set_return_value(VALUE_NONE);
902         return updated_mod_order || updated_promises;
903 }
904
905 /**
906  * Process a fence ModelAction
907  * @param curr The ModelAction to process
908  * @return True if synchronization was updated
909  */
910 bool ModelChecker::process_fence(ModelAction *curr)
911 {
912         /*
913          * fence-relaxed: no-op
914          * fence-release: only log the occurence (not in this function), for
915          *   use in later synchronization
916          * fence-acquire (this function): search for hypothetical release
917          *   sequences
918          */
919         bool updated = false;
920         if (curr->is_acquire()) {
921                 action_list_t *list = action_trace;
922                 action_list_t::reverse_iterator rit;
923                 /* Find X : is_read(X) && X --sb-> curr */
924                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
925                         ModelAction *act = *rit;
926                         if (act == curr)
927                                 continue;
928                         if (act->get_tid() != curr->get_tid())
929                                 continue;
930                         /* Stop at the beginning of the thread */
931                         if (act->is_thread_start())
932                                 break;
933                         /* Stop once we reach a prior fence-acquire */
934                         if (act->is_fence() && act->is_acquire())
935                                 break;
936                         if (!act->is_read())
937                                 continue;
938                         /* read-acquire will find its own release sequences */
939                         if (act->is_acquire())
940                                 continue;
941
942                         /* Establish hypothetical release sequences */
943                         rel_heads_list_t release_heads;
944                         get_release_seq_heads(curr, act, &release_heads);
945                         for (unsigned int i = 0; i < release_heads.size(); i++)
946                                 if (!curr->synchronize_with(release_heads[i]))
947                                         set_bad_synchronization();
948                         if (release_heads.size() != 0)
949                                 updated = true;
950                 }
951         }
952         return updated;
953 }
954
955 /**
956  * @brief Process the current action for thread-related activity
957  *
958  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
959  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
960  * synchronization, etc.  This function is a no-op for non-THREAD actions
961  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
962  *
963  * @param curr The current action
964  * @return True if synchronization was updated or a thread completed
965  */
966 bool ModelChecker::process_thread_action(ModelAction *curr)
967 {
968         bool updated = false;
969
970         switch (curr->get_type()) {
971         case THREAD_CREATE: {
972                 Thread *th = curr->get_thread_operand();
973                 th->set_creation(curr);
974                 /* Promises can be satisfied by children */
975                 for (unsigned int i = 0; i < promises->size(); i++) {
976                         Promise *promise = (*promises)[i];
977                         if (promise->thread_is_available(curr->get_tid()))
978                                 promise->add_thread(th->get_id());
979                 }
980                 break;
981         }
982         case THREAD_JOIN: {
983                 Thread *blocking = curr->get_thread_operand();
984                 ModelAction *act = get_last_action(blocking->get_id());
985                 curr->synchronize_with(act);
986                 updated = true; /* trigger rel-seq checks */
987                 break;
988         }
989         case THREAD_FINISH: {
990                 Thread *th = get_thread(curr);
991                 while (!th->wait_list_empty()) {
992                         ModelAction *act = th->pop_wait_list();
993                         scheduler->wake(get_thread(act));
994                 }
995                 th->complete();
996                 /* Completed thread can't satisfy promises */
997                 for (unsigned int i = 0; i < promises->size(); i++) {
998                         Promise *promise = (*promises)[i];
999                         if (promise->thread_is_available(th->get_id()))
1000                                 if (promise->eliminate_thread(th->get_id()))
1001                                         priv->failed_promise = true;
1002                 }
1003                 updated = true; /* trigger rel-seq checks */
1004                 break;
1005         }
1006         case THREAD_START: {
1007                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1008                 break;
1009         }
1010         default:
1011                 break;
1012         }
1013
1014         return updated;
1015 }
1016
1017 /**
1018  * @brief Process the current action for release sequence fixup activity
1019  *
1020  * Performs model-checker release sequence fixups for the current action,
1021  * forcing a single pending release sequence to break (with a given, potential
1022  * "loose" write) or to complete (i.e., synchronize). If a pending release
1023  * sequence forms a complete release sequence, then we must perform the fixup
1024  * synchronization, mo_graph additions, etc.
1025  *
1026  * @param curr The current action; must be a release sequence fixup action
1027  * @param work_queue The work queue to which to add work items as they are
1028  * generated
1029  */
1030 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1031 {
1032         const ModelAction *write = curr->get_node()->get_relseq_break();
1033         struct release_seq *sequence = pending_rel_seqs->back();
1034         pending_rel_seqs->pop_back();
1035         ASSERT(sequence);
1036         ModelAction *acquire = sequence->acquire;
1037         const ModelAction *rf = sequence->rf;
1038         const ModelAction *release = sequence->release;
1039         ASSERT(acquire);
1040         ASSERT(release);
1041         ASSERT(rf);
1042         ASSERT(release->same_thread(rf));
1043
1044         if (write == NULL) {
1045                 /**
1046                  * @todo Forcing a synchronization requires that we set
1047                  * modification order constraints. For instance, we can't allow
1048                  * a fixup sequence in which two separate read-acquire
1049                  * operations read from the same sequence, where the first one
1050                  * synchronizes and the other doesn't. Essentially, we can't
1051                  * allow any writes to insert themselves between 'release' and
1052                  * 'rf'
1053                  */
1054
1055                 /* Must synchronize */
1056                 if (!acquire->synchronize_with(release)) {
1057                         set_bad_synchronization();
1058                         return;
1059                 }
1060                 /* Re-check all pending release sequences */
1061                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1062                 /* Re-check act for mo_graph edges */
1063                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1064
1065                 /* propagate synchronization to later actions */
1066                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1067                 for (; (*rit) != acquire; rit++) {
1068                         ModelAction *propagate = *rit;
1069                         if (acquire->happens_before(propagate)) {
1070                                 propagate->synchronize_with(acquire);
1071                                 /* Re-check 'propagate' for mo_graph edges */
1072                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1073                         }
1074                 }
1075         } else {
1076                 /* Break release sequence with new edges:
1077                  *   release --mo--> write --mo--> rf */
1078                 mo_graph->addEdge(release, write);
1079                 mo_graph->addEdge(write, rf);
1080         }
1081
1082         /* See if we have realized a data race */
1083         checkDataRaces();
1084 }
1085
1086 /**
1087  * Initialize the current action by performing one or more of the following
1088  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1089  * in the NodeStack, manipulating backtracking sets, allocating and
1090  * initializing clock vectors, and computing the promises to fulfill.
1091  *
1092  * @param curr The current action, as passed from the user context; may be
1093  * freed/invalidated after the execution of this function, with a different
1094  * action "returned" its place (pass-by-reference)
1095  * @return True if curr is a newly-explored action; false otherwise
1096  */
1097 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1098 {
1099         ModelAction *newcurr;
1100
1101         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1102                 newcurr = process_rmw(*curr);
1103                 delete *curr;
1104
1105                 if (newcurr->is_rmw())
1106                         compute_promises(newcurr);
1107
1108                 *curr = newcurr;
1109                 return false;
1110         }
1111
1112         (*curr)->set_seq_number(get_next_seq_num());
1113
1114         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1115         if (newcurr) {
1116                 /* First restore type and order in case of RMW operation */
1117                 if ((*curr)->is_rmwr())
1118                         newcurr->copy_typeandorder(*curr);
1119
1120                 ASSERT((*curr)->get_location() == newcurr->get_location());
1121                 newcurr->copy_from_new(*curr);
1122
1123                 /* Discard duplicate ModelAction; use action from NodeStack */
1124                 delete *curr;
1125
1126                 /* Always compute new clock vector */
1127                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1128
1129                 *curr = newcurr;
1130                 return false; /* Action was explored previously */
1131         } else {
1132                 newcurr = *curr;
1133
1134                 /* Always compute new clock vector */
1135                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1136
1137                 /* Assign most recent release fence */
1138                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1139
1140                 /*
1141                  * Perform one-time actions when pushing new ModelAction onto
1142                  * NodeStack
1143                  */
1144                 if (newcurr->is_write())
1145                         compute_promises(newcurr);
1146                 else if (newcurr->is_relseq_fixup())
1147                         compute_relseq_breakwrites(newcurr);
1148                 else if (newcurr->is_wait())
1149                         newcurr->get_node()->set_misc_max(2);
1150                 else if (newcurr->is_notify_one()) {
1151                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1152                 }
1153                 return true; /* This was a new ModelAction */
1154         }
1155 }
1156
1157 /**
1158  * @brief Establish reads-from relation between two actions
1159  *
1160  * Perform basic operations involved with establishing a concrete rf relation,
1161  * including setting the ModelAction data and checking for release sequences.
1162  *
1163  * @param act The action that is reading (must be a read)
1164  * @param rf The action from which we are reading (must be a write)
1165  *
1166  * @return True if this read established synchronization
1167  */
1168 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1169 {
1170         act->set_read_from(rf);
1171         if (rf != NULL && act->is_acquire()) {
1172                 rel_heads_list_t release_heads;
1173                 get_release_seq_heads(act, act, &release_heads);
1174                 int num_heads = release_heads.size();
1175                 for (unsigned int i = 0; i < release_heads.size(); i++)
1176                         if (!act->synchronize_with(release_heads[i])) {
1177                                 set_bad_synchronization();
1178                                 num_heads--;
1179                         }
1180                 return num_heads > 0;
1181         }
1182         return false;
1183 }
1184
1185 /**
1186  * @brief Check whether a model action is enabled.
1187  *
1188  * Checks whether a lock or join operation would be successful (i.e., is the
1189  * lock already locked, or is the joined thread already complete). If not, put
1190  * the action in a waiter list.
1191  *
1192  * @param curr is the ModelAction to check whether it is enabled.
1193  * @return a bool that indicates whether the action is enabled.
1194  */
1195 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1196         if (curr->is_lock()) {
1197                 std::mutex *lock = (std::mutex *)curr->get_location();
1198                 struct std::mutex_state *state = lock->get_state();
1199                 if (state->islocked) {
1200                         //Stick the action in the appropriate waiting queue
1201                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1202                         return false;
1203                 }
1204         } else if (curr->get_type() == THREAD_JOIN) {
1205                 Thread *blocking = (Thread *)curr->get_location();
1206                 if (!blocking->is_complete()) {
1207                         blocking->push_wait_list(curr);
1208                         return false;
1209                 }
1210         }
1211
1212         return true;
1213 }
1214
1215 /**
1216  * Stores the ModelAction for the current thread action.  Call this
1217  * immediately before switching from user- to system-context to pass
1218  * data between them.
1219  * @param act The ModelAction created by the user-thread action
1220  */
1221 void ModelChecker::set_current_action(ModelAction *act) {
1222         priv->current_action = act;
1223 }
1224
1225 /**
1226  * This is the heart of the model checker routine. It performs model-checking
1227  * actions corresponding to a given "current action." Among other processes, it
1228  * calculates reads-from relationships, updates synchronization clock vectors,
1229  * forms a memory_order constraints graph, and handles replay/backtrack
1230  * execution when running permutations of previously-observed executions.
1231  *
1232  * @param curr The current action to process
1233  * @return The ModelAction that is actually executed; may be different than
1234  * curr; may be NULL, if the current action is not enabled to run
1235  */
1236 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1237 {
1238         ASSERT(curr);
1239         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1240
1241         if (!check_action_enabled(curr)) {
1242                 /* Make the execution look like we chose to run this action
1243                  * much later, when a lock/join can succeed */
1244                 get_thread(curr)->set_pending(curr);
1245                 scheduler->sleep(get_thread(curr));
1246                 return NULL;
1247         }
1248
1249         bool newly_explored = initialize_curr_action(&curr);
1250
1251         DBG();
1252         if (DBG_ENABLED())
1253                 curr->print();
1254
1255         wake_up_sleeping_actions(curr);
1256
1257         /* Add the action to lists before any other model-checking tasks */
1258         if (!second_part_of_rmw)
1259                 add_action_to_lists(curr);
1260
1261         /* Build may_read_from set for newly-created actions */
1262         if (newly_explored && curr->is_read())
1263                 build_reads_from_past(curr);
1264
1265         /* Initialize work_queue with the "current action" work */
1266         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1267         while (!work_queue.empty() && !has_asserted()) {
1268                 WorkQueueEntry work = work_queue.front();
1269                 work_queue.pop_front();
1270
1271                 switch (work.type) {
1272                 case WORK_CHECK_CURR_ACTION: {
1273                         ModelAction *act = work.action;
1274                         bool update = false; /* update this location's release seq's */
1275                         bool update_all = false; /* update all release seq's */
1276
1277                         if (process_thread_action(curr))
1278                                 update_all = true;
1279
1280                         if (act->is_read() && process_read(act, second_part_of_rmw))
1281                                 update = true;
1282
1283                         if (act->is_write() && process_write(act))
1284                                 update = true;
1285
1286                         if (act->is_fence() && process_fence(act))
1287                                 update_all = true;
1288
1289                         if (act->is_mutex_op() && process_mutex(act))
1290                                 update_all = true;
1291
1292                         if (act->is_relseq_fixup())
1293                                 process_relseq_fixup(curr, &work_queue);
1294
1295                         if (update_all)
1296                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1297                         else if (update)
1298                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1299                         break;
1300                 }
1301                 case WORK_CHECK_RELEASE_SEQ:
1302                         resolve_release_sequences(work.location, &work_queue);
1303                         break;
1304                 case WORK_CHECK_MO_EDGES: {
1305                         /** @todo Complete verification of work_queue */
1306                         ModelAction *act = work.action;
1307                         bool updated = false;
1308
1309                         if (act->is_read()) {
1310                                 const ModelAction *rf = act->get_reads_from();
1311                                 const Promise *promise = act->get_reads_from_promise();
1312                                 if (rf) {
1313                                         if (r_modification_order(act, rf))
1314                                                 updated = true;
1315                                 } else if (promise) {
1316                                         if (r_modification_order(act, promise))
1317                                                 updated = true;
1318                                 }
1319                         }
1320                         if (act->is_write()) {
1321                                 if (w_modification_order(act))
1322                                         updated = true;
1323                         }
1324                         mo_graph->commitChanges();
1325
1326                         if (updated)
1327                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1328                         break;
1329                 }
1330                 default:
1331                         ASSERT(false);
1332                         break;
1333                 }
1334         }
1335
1336         check_curr_backtracking(curr);
1337         set_backtracking(curr);
1338         return curr;
1339 }
1340
1341 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1342 {
1343         Node *currnode = curr->get_node();
1344         Node *parnode = currnode->get_parent();
1345
1346         if ((parnode && !parnode->backtrack_empty()) ||
1347                          !currnode->misc_empty() ||
1348                          !currnode->read_from_empty() ||
1349                          !currnode->future_value_empty() ||
1350                          !currnode->promise_empty() ||
1351                          !currnode->relseq_break_empty()) {
1352                 set_latest_backtrack(curr);
1353         }
1354 }
1355
1356 bool ModelChecker::promises_expired() const
1357 {
1358         for (unsigned int i = 0; i < promises->size(); i++) {
1359                 Promise *promise = (*promises)[i];
1360                 if (promise->get_expiration() < priv->used_sequence_numbers)
1361                         return true;
1362         }
1363         return false;
1364 }
1365
1366 /**
1367  * This is the strongest feasibility check available.
1368  * @return whether the current trace (partial or complete) must be a prefix of
1369  * a feasible trace.
1370  */
1371 bool ModelChecker::isfeasibleprefix() const
1372 {
1373         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1374 }
1375
1376 /**
1377  * Print disagnostic information about an infeasible execution
1378  * @param prefix A string to prefix the output with; if NULL, then a default
1379  * message prefix will be provided
1380  */
1381 void ModelChecker::print_infeasibility(const char *prefix) const
1382 {
1383         char buf[100];
1384         char *ptr = buf;
1385         if (mo_graph->checkForCycles())
1386                 ptr += sprintf(ptr, "[mo cycle]");
1387         if (priv->failed_promise)
1388                 ptr += sprintf(ptr, "[failed promise]");
1389         if (priv->too_many_reads)
1390                 ptr += sprintf(ptr, "[too many reads]");
1391         if (priv->no_valid_reads)
1392                 ptr += sprintf(ptr, "[no valid reads-from]");
1393         if (priv->bad_synchronization)
1394                 ptr += sprintf(ptr, "[bad sw ordering]");
1395         if (promises_expired())
1396                 ptr += sprintf(ptr, "[promise expired]");
1397         if (promises->size() != 0)
1398                 ptr += sprintf(ptr, "[unresolved promise]");
1399         if (ptr != buf)
1400                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1401 }
1402
1403 /**
1404  * Returns whether the current completed trace is feasible, except for pending
1405  * release sequences.
1406  */
1407 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1408 {
1409         return !is_infeasible() && promises->size() == 0;
1410 }
1411
1412 /**
1413  * Check if the current partial trace is infeasible. Does not check any
1414  * end-of-execution flags, which might rule out the execution. Thus, this is
1415  * useful only for ruling an execution as infeasible.
1416  * @return whether the current partial trace is infeasible.
1417  */
1418 bool ModelChecker::is_infeasible() const
1419 {
1420         return mo_graph->checkForCycles() ||
1421                 priv->no_valid_reads ||
1422                 priv->failed_promise ||
1423                 priv->too_many_reads ||
1424                 priv->bad_synchronization ||
1425                 promises_expired();
1426 }
1427
1428 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1429 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1430         ModelAction *lastread = get_last_action(act->get_tid());
1431         lastread->process_rmw(act);
1432         if (act->is_rmw()) {
1433                 if (lastread->get_reads_from())
1434                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1435                 else
1436                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1437                 mo_graph->commitChanges();
1438         }
1439         return lastread;
1440 }
1441
1442 /**
1443  * Checks whether a thread has read from the same write for too many times
1444  * without seeing the effects of a later write.
1445  *
1446  * Basic idea:
1447  * 1) there must a different write that we could read from that would satisfy the modification order,
1448  * 2) we must have read from the same value in excess of maxreads times, and
1449  * 3) that other write must have been in the reads_from set for maxreads times.
1450  *
1451  * If so, we decide that the execution is no longer feasible.
1452  */
1453 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1454 {
1455         if (params.maxreads != 0) {
1456                 if (curr->get_node()->get_read_from_size() <= 1)
1457                         return;
1458                 //Must make sure that execution is currently feasible...  We could
1459                 //accidentally clear by rolling back
1460                 if (is_infeasible())
1461                         return;
1462                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1463                 int tid = id_to_int(curr->get_tid());
1464
1465                 /* Skip checks */
1466                 if ((int)thrd_lists->size() <= tid)
1467                         return;
1468                 action_list_t *list = &(*thrd_lists)[tid];
1469
1470                 action_list_t::reverse_iterator rit = list->rbegin();
1471                 /* Skip past curr */
1472                 for (; (*rit) != curr; rit++)
1473                         ;
1474                 /* go past curr now */
1475                 rit++;
1476
1477                 action_list_t::reverse_iterator ritcopy = rit;
1478                 //See if we have enough reads from the same value
1479                 int count = 0;
1480                 for (; count < params.maxreads; rit++, count++) {
1481                         if (rit == list->rend())
1482                                 return;
1483                         ModelAction *act = *rit;
1484                         if (!act->is_read())
1485                                 return;
1486
1487                         if (act->get_reads_from() != rf)
1488                                 return;
1489                         if (act->get_node()->get_read_from_size() <= 1)
1490                                 return;
1491                 }
1492                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1493                         /* Get write */
1494                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1495
1496                         /* Need a different write */
1497                         if (write == rf)
1498                                 continue;
1499
1500                         /* Test to see whether this is a feasible write to read from */
1501                         /** NOTE: all members of read-from set should be
1502                          *  feasible, so we no longer check it here **/
1503
1504                         rit = ritcopy;
1505
1506                         bool feasiblewrite = true;
1507                         //new we need to see if this write works for everyone
1508
1509                         for (int loop = count; loop > 0; loop--, rit++) {
1510                                 ModelAction *act = *rit;
1511                                 bool foundvalue = false;
1512                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1513                                         if (act->get_node()->get_read_from_at(j) == write) {
1514                                                 foundvalue = true;
1515                                                 break;
1516                                         }
1517                                 }
1518                                 if (!foundvalue) {
1519                                         feasiblewrite = false;
1520                                         break;
1521                                 }
1522                         }
1523                         if (feasiblewrite) {
1524                                 priv->too_many_reads = true;
1525                                 return;
1526                         }
1527                 }
1528         }
1529 }
1530
1531 /**
1532  * Updates the mo_graph with the constraints imposed from the current
1533  * read.
1534  *
1535  * Basic idea is the following: Go through each other thread and find
1536  * the last action that happened before our read.  Two cases:
1537  *
1538  * (1) The action is a write => that write must either occur before
1539  * the write we read from or be the write we read from.
1540  *
1541  * (2) The action is a read => the write that that action read from
1542  * must occur before the write we read from or be the same write.
1543  *
1544  * @param curr The current action. Must be a read.
1545  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1546  * @return True if modification order edges were added; false otherwise
1547  */
1548 template <typename rf_type>
1549 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1550 {
1551         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1552         unsigned int i;
1553         bool added = false;
1554         ASSERT(curr->is_read());
1555
1556         /* Last SC fence in the current thread */
1557         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1558
1559         /* Iterate over all threads */
1560         for (i = 0; i < thrd_lists->size(); i++) {
1561                 /* Last SC fence in thread i */
1562                 ModelAction *last_sc_fence_thread_local = NULL;
1563                 if (int_to_id((int)i) != curr->get_tid())
1564                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1565
1566                 /* Last SC fence in thread i, before last SC fence in current thread */
1567                 ModelAction *last_sc_fence_thread_before = NULL;
1568                 if (last_sc_fence_local)
1569                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1570
1571                 /* Iterate over actions in thread, starting from most recent */
1572                 action_list_t *list = &(*thrd_lists)[i];
1573                 action_list_t::reverse_iterator rit;
1574                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1575                         ModelAction *act = *rit;
1576
1577                         if (act->is_write() && !act->equals(rf) && act != curr) {
1578                                 /* C++, Section 29.3 statement 5 */
1579                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1580                                                 *act < *last_sc_fence_thread_local) {
1581                                         added = mo_graph->addEdge(act, rf) || added;
1582                                         break;
1583                                 }
1584                                 /* C++, Section 29.3 statement 4 */
1585                                 else if (act->is_seqcst() && last_sc_fence_local &&
1586                                                 *act < *last_sc_fence_local) {
1587                                         added = mo_graph->addEdge(act, rf) || added;
1588                                         break;
1589                                 }
1590                                 /* C++, Section 29.3 statement 6 */
1591                                 else if (last_sc_fence_thread_before &&
1592                                                 *act < *last_sc_fence_thread_before) {
1593                                         added = mo_graph->addEdge(act, rf) || added;
1594                                         break;
1595                                 }
1596                         }
1597
1598                         /*
1599                          * Include at most one act per-thread that "happens
1600                          * before" curr. Don't consider reflexively.
1601                          */
1602                         if (act->happens_before(curr) && act != curr) {
1603                                 if (act->is_write()) {
1604                                         if (!act->equals(rf)) {
1605                                                 added = mo_graph->addEdge(act, rf) || added;
1606                                         }
1607                                 } else {
1608                                         const ModelAction *prevreadfrom = act->get_reads_from();
1609                                         //if the previous read is unresolved, keep going...
1610                                         if (prevreadfrom == NULL)
1611                                                 continue;
1612
1613                                         if (!prevreadfrom->equals(rf)) {
1614                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1615                                         }
1616                                 }
1617                                 break;
1618                         }
1619                 }
1620         }
1621
1622         /*
1623          * All compatible, thread-exclusive promises must be ordered after any
1624          * concrete loads from the same thread
1625          */
1626         for (unsigned int i = 0; i < promises->size(); i++)
1627                 if ((*promises)[i]->is_compatible_exclusive(curr))
1628                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1629
1630         return added;
1631 }
1632
1633 /**
1634  * Updates the mo_graph with the constraints imposed from the current write.
1635  *
1636  * Basic idea is the following: Go through each other thread and find
1637  * the lastest action that happened before our write.  Two cases:
1638  *
1639  * (1) The action is a write => that write must occur before
1640  * the current write
1641  *
1642  * (2) The action is a read => the write that that action read from
1643  * must occur before the current write.
1644  *
1645  * This method also handles two other issues:
1646  *
1647  * (I) Sequential Consistency: Making sure that if the current write is
1648  * seq_cst, that it occurs after the previous seq_cst write.
1649  *
1650  * (II) Sending the write back to non-synchronizing reads.
1651  *
1652  * @param curr The current action. Must be a write.
1653  * @return True if modification order edges were added; false otherwise
1654  */
1655 bool ModelChecker::w_modification_order(ModelAction *curr)
1656 {
1657         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1658         unsigned int i;
1659         bool added = false;
1660         ASSERT(curr->is_write());
1661
1662         if (curr->is_seqcst()) {
1663                 /* We have to at least see the last sequentially consistent write,
1664                          so we are initialized. */
1665                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1666                 if (last_seq_cst != NULL) {
1667                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1668                 }
1669         }
1670
1671         /* Last SC fence in the current thread */
1672         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1673
1674         /* Iterate over all threads */
1675         for (i = 0; i < thrd_lists->size(); i++) {
1676                 /* Last SC fence in thread i, before last SC fence in current thread */
1677                 ModelAction *last_sc_fence_thread_before = NULL;
1678                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1679                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1680
1681                 /* Iterate over actions in thread, starting from most recent */
1682                 action_list_t *list = &(*thrd_lists)[i];
1683                 action_list_t::reverse_iterator rit;
1684                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1685                         ModelAction *act = *rit;
1686                         if (act == curr) {
1687                                 /*
1688                                  * 1) If RMW and it actually read from something, then we
1689                                  * already have all relevant edges, so just skip to next
1690                                  * thread.
1691                                  *
1692                                  * 2) If RMW and it didn't read from anything, we should
1693                                  * whatever edge we can get to speed up convergence.
1694                                  *
1695                                  * 3) If normal write, we need to look at earlier actions, so
1696                                  * continue processing list.
1697                                  */
1698                                 if (curr->is_rmw()) {
1699                                         if (curr->get_reads_from() != NULL)
1700                                                 break;
1701                                         else
1702                                                 continue;
1703                                 } else
1704                                         continue;
1705                         }
1706
1707                         /* C++, Section 29.3 statement 7 */
1708                         if (last_sc_fence_thread_before && act->is_write() &&
1709                                         *act < *last_sc_fence_thread_before) {
1710                                 added = mo_graph->addEdge(act, curr) || added;
1711                                 break;
1712                         }
1713
1714                         /*
1715                          * Include at most one act per-thread that "happens
1716                          * before" curr
1717                          */
1718                         if (act->happens_before(curr)) {
1719                                 /*
1720                                  * Note: if act is RMW, just add edge:
1721                                  *   act --mo--> curr
1722                                  * The following edge should be handled elsewhere:
1723                                  *   readfrom(act) --mo--> act
1724                                  */
1725                                 if (act->is_write())
1726                                         added = mo_graph->addEdge(act, curr) || added;
1727                                 else if (act->is_read()) {
1728                                         //if previous read accessed a null, just keep going
1729                                         if (act->get_reads_from() == NULL)
1730                                                 continue;
1731                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1732                                 }
1733                                 break;
1734                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1735                                                      !act->same_thread(curr)) {
1736                                 /* We have an action that:
1737                                    (1) did not happen before us
1738                                    (2) is a read and we are a write
1739                                    (3) cannot synchronize with us
1740                                    (4) is in a different thread
1741                                    =>
1742                                    that read could potentially read from our write.  Note that
1743                                    these checks are overly conservative at this point, we'll
1744                                    do more checks before actually removing the
1745                                    pendingfuturevalue.
1746
1747                                  */
1748                                 if (thin_air_constraint_may_allow(curr, act)) {
1749                                         if (!is_infeasible())
1750                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1751                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1752                                                 add_future_value(curr, act);
1753                                 }
1754                         }
1755                 }
1756         }
1757
1758         /*
1759          * All compatible, thread-exclusive promises must be ordered after any
1760          * concrete stores to the same thread, or else they can be merged with
1761          * this store later
1762          */
1763         for (unsigned int i = 0; i < promises->size(); i++)
1764                 if ((*promises)[i]->is_compatible_exclusive(curr))
1765                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1766
1767         return added;
1768 }
1769
1770 /** Arbitrary reads from the future are not allowed.  Section 29.3
1771  * part 9 places some constraints.  This method checks one result of constraint
1772  * constraint.  Others require compiler support. */
1773 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1774 {
1775         if (!writer->is_rmw())
1776                 return true;
1777
1778         if (!reader->is_rmw())
1779                 return true;
1780
1781         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1782                 if (search == reader)
1783                         return false;
1784                 if (search->get_tid() == reader->get_tid() &&
1785                                 search->happens_before(reader))
1786                         break;
1787         }
1788
1789         return true;
1790 }
1791
1792 /**
1793  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1794  * some constraints. This method checks one the following constraint (others
1795  * require compiler support):
1796  *
1797  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1798  */
1799 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1800 {
1801         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1802         unsigned int i;
1803         /* Iterate over all threads */
1804         for (i = 0; i < thrd_lists->size(); i++) {
1805                 const ModelAction *write_after_read = NULL;
1806
1807                 /* Iterate over actions in thread, starting from most recent */
1808                 action_list_t *list = &(*thrd_lists)[i];
1809                 action_list_t::reverse_iterator rit;
1810                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1811                         ModelAction *act = *rit;
1812
1813                         /* Don't disallow due to act == reader */
1814                         if (!reader->happens_before(act) || reader == act)
1815                                 break;
1816                         else if (act->is_write())
1817                                 write_after_read = act;
1818                         else if (act->is_read() && act->get_reads_from() != NULL)
1819                                 write_after_read = act->get_reads_from();
1820                 }
1821
1822                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1823                         return false;
1824         }
1825         return true;
1826 }
1827
1828 /**
1829  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1830  * The ModelAction under consideration is expected to be taking part in
1831  * release/acquire synchronization as an object of the "reads from" relation.
1832  * Note that this can only provide release sequence support for RMW chains
1833  * which do not read from the future, as those actions cannot be traced until
1834  * their "promise" is fulfilled. Similarly, we may not even establish the
1835  * presence of a release sequence with certainty, as some modification order
1836  * constraints may be decided further in the future. Thus, this function
1837  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1838  * and a boolean representing certainty.
1839  *
1840  * @param rf The action that might be part of a release sequence. Must be a
1841  * write.
1842  * @param release_heads A pass-by-reference style return parameter. After
1843  * execution of this function, release_heads will contain the heads of all the
1844  * relevant release sequences, if any exists with certainty
1845  * @param pending A pass-by-reference style return parameter which is only used
1846  * when returning false (i.e., uncertain). Returns most information regarding
1847  * an uncertain release sequence, including any write operations that might
1848  * break the sequence.
1849  * @return true, if the ModelChecker is certain that release_heads is complete;
1850  * false otherwise
1851  */
1852 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1853                 rel_heads_list_t *release_heads,
1854                 struct release_seq *pending) const
1855 {
1856         /* Only check for release sequences if there are no cycles */
1857         if (mo_graph->checkForCycles())
1858                 return false;
1859
1860         while (rf) {
1861                 ASSERT(rf->is_write());
1862
1863                 if (rf->is_release())
1864                         release_heads->push_back(rf);
1865                 else if (rf->get_last_fence_release())
1866                         release_heads->push_back(rf->get_last_fence_release());
1867                 if (!rf->is_rmw())
1868                         break; /* End of RMW chain */
1869
1870                 /** @todo Need to be smarter here...  In the linux lock
1871                  * example, this will run to the beginning of the program for
1872                  * every acquire. */
1873                 /** @todo The way to be smarter here is to keep going until 1
1874                  * thread has a release preceded by an acquire and you've seen
1875                  *       both. */
1876
1877                 /* acq_rel RMW is a sufficient stopping condition */
1878                 if (rf->is_acquire() && rf->is_release())
1879                         return true; /* complete */
1880
1881                 rf = rf->get_reads_from();
1882         };
1883         if (!rf) {
1884                 /* read from future: need to settle this later */
1885                 pending->rf = NULL;
1886                 return false; /* incomplete */
1887         }
1888
1889         if (rf->is_release())
1890                 return true; /* complete */
1891
1892         /* else relaxed write
1893          * - check for fence-release in the same thread (29.8, stmt. 3)
1894          * - check modification order for contiguous subsequence
1895          *   -> rf must be same thread as release */
1896
1897         const ModelAction *fence_release = rf->get_last_fence_release();
1898         /* Synchronize with a fence-release unconditionally; we don't need to
1899          * find any more "contiguous subsequence..." for it */
1900         if (fence_release)
1901                 release_heads->push_back(fence_release);
1902
1903         int tid = id_to_int(rf->get_tid());
1904         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1905         action_list_t *list = &(*thrd_lists)[tid];
1906         action_list_t::const_reverse_iterator rit;
1907
1908         /* Find rf in the thread list */
1909         rit = std::find(list->rbegin(), list->rend(), rf);
1910         ASSERT(rit != list->rend());
1911
1912         /* Find the last {write,fence}-release */
1913         for (; rit != list->rend(); rit++) {
1914                 if (fence_release && *(*rit) < *fence_release)
1915                         break;
1916                 if ((*rit)->is_release())
1917                         break;
1918         }
1919         if (rit == list->rend()) {
1920                 /* No write-release in this thread */
1921                 return true; /* complete */
1922         } else if (fence_release && *(*rit) < *fence_release) {
1923                 /* The fence-release is more recent (and so, "stronger") than
1924                  * the most recent write-release */
1925                 return true; /* complete */
1926         } /* else, need to establish contiguous release sequence */
1927         ModelAction *release = *rit;
1928
1929         ASSERT(rf->same_thread(release));
1930
1931         pending->writes.clear();
1932
1933         bool certain = true;
1934         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1935                 if (id_to_int(rf->get_tid()) == (int)i)
1936                         continue;
1937                 list = &(*thrd_lists)[i];
1938
1939                 /* Can we ensure no future writes from this thread may break
1940                  * the release seq? */
1941                 bool future_ordered = false;
1942
1943                 ModelAction *last = get_last_action(int_to_id(i));
1944                 Thread *th = get_thread(int_to_id(i));
1945                 if ((last && rf->happens_before(last)) ||
1946                                 !is_enabled(th) ||
1947                                 th->is_complete())
1948                         future_ordered = true;
1949
1950                 ASSERT(!th->is_model_thread() || future_ordered);
1951
1952                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1953                         const ModelAction *act = *rit;
1954                         /* Reach synchronization -> this thread is complete */
1955                         if (act->happens_before(release))
1956                                 break;
1957                         if (rf->happens_before(act)) {
1958                                 future_ordered = true;
1959                                 continue;
1960                         }
1961
1962                         /* Only non-RMW writes can break release sequences */
1963                         if (!act->is_write() || act->is_rmw())
1964                                 continue;
1965
1966                         /* Check modification order */
1967                         if (mo_graph->checkReachable(rf, act)) {
1968                                 /* rf --mo--> act */
1969                                 future_ordered = true;
1970                                 continue;
1971                         }
1972                         if (mo_graph->checkReachable(act, release))
1973                                 /* act --mo--> release */
1974                                 break;
1975                         if (mo_graph->checkReachable(release, act) &&
1976                                       mo_graph->checkReachable(act, rf)) {
1977                                 /* release --mo-> act --mo--> rf */
1978                                 return true; /* complete */
1979                         }
1980                         /* act may break release sequence */
1981                         pending->writes.push_back(act);
1982                         certain = false;
1983                 }
1984                 if (!future_ordered)
1985                         certain = false; /* This thread is uncertain */
1986         }
1987
1988         if (certain) {
1989                 release_heads->push_back(release);
1990                 pending->writes.clear();
1991         } else {
1992                 pending->release = release;
1993                 pending->rf = rf;
1994         }
1995         return certain;
1996 }
1997
1998 /**
1999  * An interface for getting the release sequence head(s) with which a
2000  * given ModelAction must synchronize. This function only returns a non-empty
2001  * result when it can locate a release sequence head with certainty. Otherwise,
2002  * it may mark the internal state of the ModelChecker so that it will handle
2003  * the release sequence at a later time, causing @a acquire to update its
2004  * synchronization at some later point in execution.
2005  *
2006  * @param acquire The 'acquire' action that may synchronize with a release
2007  * sequence
2008  * @param read The read action that may read from a release sequence; this may
2009  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2010  * when 'acquire' is a fence-acquire)
2011  * @param release_heads A pass-by-reference return parameter. Will be filled
2012  * with the head(s) of the release sequence(s), if they exists with certainty.
2013  * @see ModelChecker::release_seq_heads
2014  */
2015 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2016                 ModelAction *read, rel_heads_list_t *release_heads)
2017 {
2018         const ModelAction *rf = read->get_reads_from();
2019         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2020         sequence->acquire = acquire;
2021         sequence->read = read;
2022
2023         if (!release_seq_heads(rf, release_heads, sequence)) {
2024                 /* add act to 'lazy checking' list */
2025                 pending_rel_seqs->push_back(sequence);
2026         } else {
2027                 snapshot_free(sequence);
2028         }
2029 }
2030
2031 /**
2032  * Attempt to resolve all stashed operations that might synchronize with a
2033  * release sequence for a given location. This implements the "lazy" portion of
2034  * determining whether or not a release sequence was contiguous, since not all
2035  * modification order information is present at the time an action occurs.
2036  *
2037  * @param location The location/object that should be checked for release
2038  * sequence resolutions. A NULL value means to check all locations.
2039  * @param work_queue The work queue to which to add work items as they are
2040  * generated
2041  * @return True if any updates occurred (new synchronization, new mo_graph
2042  * edges)
2043  */
2044 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2045 {
2046         bool updated = false;
2047         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2048         while (it != pending_rel_seqs->end()) {
2049                 struct release_seq *pending = *it;
2050                 ModelAction *acquire = pending->acquire;
2051                 const ModelAction *read = pending->read;
2052
2053                 /* Only resolve sequences on the given location, if provided */
2054                 if (location && read->get_location() != location) {
2055                         it++;
2056                         continue;
2057                 }
2058
2059                 const ModelAction *rf = read->get_reads_from();
2060                 rel_heads_list_t release_heads;
2061                 bool complete;
2062                 complete = release_seq_heads(rf, &release_heads, pending);
2063                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2064                         if (!acquire->has_synchronized_with(release_heads[i])) {
2065                                 if (acquire->synchronize_with(release_heads[i]))
2066                                         updated = true;
2067                                 else
2068                                         set_bad_synchronization();
2069                         }
2070                 }
2071
2072                 if (updated) {
2073                         /* Re-check all pending release sequences */
2074                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2075                         /* Re-check read-acquire for mo_graph edges */
2076                         if (acquire->is_read())
2077                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2078
2079                         /* propagate synchronization to later actions */
2080                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2081                         for (; (*rit) != acquire; rit++) {
2082                                 ModelAction *propagate = *rit;
2083                                 if (acquire->happens_before(propagate)) {
2084                                         propagate->synchronize_with(acquire);
2085                                         /* Re-check 'propagate' for mo_graph edges */
2086                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2087                                 }
2088                         }
2089                 }
2090                 if (complete) {
2091                         it = pending_rel_seqs->erase(it);
2092                         snapshot_free(pending);
2093                 } else {
2094                         it++;
2095                 }
2096         }
2097
2098         // If we resolved promises or data races, see if we have realized a data race.
2099         checkDataRaces();
2100
2101         return updated;
2102 }
2103
2104 /**
2105  * Performs various bookkeeping operations for the current ModelAction. For
2106  * instance, adds action to the per-object, per-thread action vector and to the
2107  * action trace list of all thread actions.
2108  *
2109  * @param act is the ModelAction to add.
2110  */
2111 void ModelChecker::add_action_to_lists(ModelAction *act)
2112 {
2113         int tid = id_to_int(act->get_tid());
2114         ModelAction *uninit = NULL;
2115         int uninit_id = -1;
2116         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2117         if (list->empty() && act->is_atomic_var()) {
2118                 uninit = new_uninitialized_action(act->get_location());
2119                 uninit_id = id_to_int(uninit->get_tid());
2120                 list->push_back(uninit);
2121         }
2122         list->push_back(act);
2123
2124         action_trace->push_back(act);
2125         if (uninit)
2126                 action_trace->push_front(uninit);
2127
2128         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2129         if (tid >= (int)vec->size())
2130                 vec->resize(priv->next_thread_id);
2131         (*vec)[tid].push_back(act);
2132         if (uninit)
2133                 (*vec)[uninit_id].push_front(uninit);
2134
2135         if ((int)thrd_last_action->size() <= tid)
2136                 thrd_last_action->resize(get_num_threads());
2137         (*thrd_last_action)[tid] = act;
2138         if (uninit)
2139                 (*thrd_last_action)[uninit_id] = uninit;
2140
2141         if (act->is_fence() && act->is_release()) {
2142                 if ((int)thrd_last_fence_release->size() <= tid)
2143                         thrd_last_fence_release->resize(get_num_threads());
2144                 (*thrd_last_fence_release)[tid] = act;
2145         }
2146
2147         if (act->is_wait()) {
2148                 void *mutex_loc = (void *) act->get_value();
2149                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2150
2151                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2152                 if (tid >= (int)vec->size())
2153                         vec->resize(priv->next_thread_id);
2154                 (*vec)[tid].push_back(act);
2155         }
2156 }
2157
2158 /**
2159  * @brief Get the last action performed by a particular Thread
2160  * @param tid The thread ID of the Thread in question
2161  * @return The last action in the thread
2162  */
2163 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2164 {
2165         int threadid = id_to_int(tid);
2166         if (threadid < (int)thrd_last_action->size())
2167                 return (*thrd_last_action)[id_to_int(tid)];
2168         else
2169                 return NULL;
2170 }
2171
2172 /**
2173  * @brief Get the last fence release performed by a particular Thread
2174  * @param tid The thread ID of the Thread in question
2175  * @return The last fence release in the thread, if one exists; NULL otherwise
2176  */
2177 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2178 {
2179         int threadid = id_to_int(tid);
2180         if (threadid < (int)thrd_last_fence_release->size())
2181                 return (*thrd_last_fence_release)[id_to_int(tid)];
2182         else
2183                 return NULL;
2184 }
2185
2186 /**
2187  * Gets the last memory_order_seq_cst write (in the total global sequence)
2188  * performed on a particular object (i.e., memory location), not including the
2189  * current action.
2190  * @param curr The current ModelAction; also denotes the object location to
2191  * check
2192  * @return The last seq_cst write
2193  */
2194 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2195 {
2196         void *location = curr->get_location();
2197         action_list_t *list = get_safe_ptr_action(obj_map, location);
2198         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2199         action_list_t::reverse_iterator rit;
2200         for (rit = list->rbegin(); rit != list->rend(); rit++)
2201                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2202                         return *rit;
2203         return NULL;
2204 }
2205
2206 /**
2207  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2208  * performed in a particular thread, prior to a particular fence.
2209  * @param tid The ID of the thread to check
2210  * @param before_fence The fence from which to begin the search; if NULL, then
2211  * search for the most recent fence in the thread.
2212  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2213  */
2214 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2215 {
2216         /* All fences should have NULL location */
2217         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2218         action_list_t::reverse_iterator rit = list->rbegin();
2219
2220         if (before_fence) {
2221                 for (; rit != list->rend(); rit++)
2222                         if (*rit == before_fence)
2223                                 break;
2224
2225                 ASSERT(*rit == before_fence);
2226                 rit++;
2227         }
2228
2229         for (; rit != list->rend(); rit++)
2230                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2231                         return *rit;
2232         return NULL;
2233 }
2234
2235 /**
2236  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2237  * location). This function identifies the mutex according to the current
2238  * action, which is presumed to perform on the same mutex.
2239  * @param curr The current ModelAction; also denotes the object location to
2240  * check
2241  * @return The last unlock operation
2242  */
2243 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2244 {
2245         void *location = curr->get_location();
2246         action_list_t *list = get_safe_ptr_action(obj_map, location);
2247         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2248         action_list_t::reverse_iterator rit;
2249         for (rit = list->rbegin(); rit != list->rend(); rit++)
2250                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2251                         return *rit;
2252         return NULL;
2253 }
2254
2255 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2256 {
2257         ModelAction *parent = get_last_action(tid);
2258         if (!parent)
2259                 parent = get_thread(tid)->get_creation();
2260         return parent;
2261 }
2262
2263 /**
2264  * Returns the clock vector for a given thread.
2265  * @param tid The thread whose clock vector we want
2266  * @return Desired clock vector
2267  */
2268 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2269 {
2270         return get_parent_action(tid)->get_cv();
2271 }
2272
2273 /**
2274  * Resolve a set of Promises with a current write. The set is provided in the
2275  * Node corresponding to @a write.
2276  * @param write The ModelAction that is fulfilling Promises
2277  * @return True if promises were resolved; false otherwise
2278  */
2279 bool ModelChecker::resolve_promises(ModelAction *write)
2280 {
2281         bool haveResolved = false;
2282         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2283         promise_list_t mustResolve, resolved;
2284
2285         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2286                 Promise *promise = (*promises)[promise_index];
2287                 if (write->get_node()->get_promise(i)) {
2288                         ModelAction *read = promise->get_action();
2289                         read_from(read, write);
2290                         //Make sure the promise's value matches the write's value
2291                         ASSERT(promise->is_compatible(write));
2292                         mo_graph->resolvePromise(read, write, &mustResolve);
2293
2294                         resolved.push_back(promise);
2295                         promises->erase(promises->begin() + promise_index);
2296                         actions_to_check.push_back(read);
2297
2298                         haveResolved = true;
2299                 } else
2300                         promise_index++;
2301         }
2302
2303         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2304                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2305                                 == resolved.end())
2306                         priv->failed_promise = true;
2307         }
2308         for (unsigned int i = 0; i < resolved.size(); i++)
2309                 delete resolved[i];
2310         //Check whether reading these writes has made threads unable to
2311         //resolve promises
2312
2313         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2314                 ModelAction *read = actions_to_check[i];
2315                 mo_check_promises(read, true);
2316         }
2317
2318         return haveResolved;
2319 }
2320
2321 /**
2322  * Compute the set of promises that could potentially be satisfied by this
2323  * action. Note that the set computation actually appears in the Node, not in
2324  * ModelChecker.
2325  * @param curr The ModelAction that may satisfy promises
2326  */
2327 void ModelChecker::compute_promises(ModelAction *curr)
2328 {
2329         for (unsigned int i = 0; i < promises->size(); i++) {
2330                 Promise *promise = (*promises)[i];
2331                 const ModelAction *act = promise->get_action();
2332                 if (!act->happens_before(curr) &&
2333                                 act->is_read() &&
2334                                 !act->could_synchronize_with(curr) &&
2335                                 !act->same_thread(curr) &&
2336                                 act->get_location() == curr->get_location() &&
2337                                 promise->get_value() == curr->get_value()) {
2338                         curr->get_node()->set_promise(i, act->is_rmw());
2339                 }
2340         }
2341 }
2342
2343 /** Checks promises in response to change in ClockVector Threads. */
2344 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2345 {
2346         for (unsigned int i = 0; i < promises->size(); i++) {
2347                 Promise *promise = (*promises)[i];
2348                 const ModelAction *act = promise->get_action();
2349                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2350                                 merge_cv->synchronized_since(act)) {
2351                         if (promise->eliminate_thread(tid)) {
2352                                 //Promise has failed
2353                                 priv->failed_promise = true;
2354                                 return;
2355                         }
2356                 }
2357         }
2358 }
2359
2360 void ModelChecker::check_promises_thread_disabled()
2361 {
2362         for (unsigned int i = 0; i < promises->size(); i++) {
2363                 Promise *promise = (*promises)[i];
2364                 if (promise->has_failed()) {
2365                         priv->failed_promise = true;
2366                         return;
2367                 }
2368         }
2369 }
2370
2371 /**
2372  * @brief Checks promises in response to addition to modification order for
2373  * threads.
2374  *
2375  * We test whether threads are still available for satisfying promises after an
2376  * addition to our modification order constraints. Those that are unavailable
2377  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2378  * that promise has failed.
2379  *
2380  * @param act The ModelAction which updated the modification order
2381  * @param is_read_check Should be true if act is a read and we must check for
2382  * updates to the store from which it read (there is a distinction here for
2383  * RMW's, which are both a load and a store)
2384  */
2385 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2386 {
2387         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2388
2389         for (unsigned int i = 0; i < promises->size(); i++) {
2390                 Promise *promise = (*promises)[i];
2391                 const ModelAction *pread = promise->get_action();
2392
2393                 // Is this promise on the same location?
2394                 if (!pread->same_var(write))
2395                         continue;
2396
2397                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2398                         priv->failed_promise = true;
2399                         return;
2400                 }
2401
2402                 // Don't do any lookups twice for the same thread
2403                 if (!promise->thread_is_available(act->get_tid()))
2404                         continue;
2405
2406                 if (mo_graph->checkReachable(promise, write)) {
2407                         if (mo_graph->checkPromise(write, promise)) {
2408                                 priv->failed_promise = true;
2409                                 return;
2410                         }
2411                 }
2412         }
2413 }
2414
2415 /**
2416  * Compute the set of writes that may break the current pending release
2417  * sequence. This information is extracted from previou release sequence
2418  * calculations.
2419  *
2420  * @param curr The current ModelAction. Must be a release sequence fixup
2421  * action.
2422  */
2423 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2424 {
2425         if (pending_rel_seqs->empty())
2426                 return;
2427
2428         struct release_seq *pending = pending_rel_seqs->back();
2429         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2430                 const ModelAction *write = pending->writes[i];
2431                 curr->get_node()->add_relseq_break(write);
2432         }
2433
2434         /* NULL means don't break the sequence; just synchronize */
2435         curr->get_node()->add_relseq_break(NULL);
2436 }
2437
2438 /**
2439  * Build up an initial set of all past writes that this 'read' action may read
2440  * from. This set is determined by the clock vector's "happens before"
2441  * relationship.
2442  * @param curr is the current ModelAction that we are exploring; it must be a
2443  * 'read' operation.
2444  */
2445 void ModelChecker::build_reads_from_past(ModelAction *curr)
2446 {
2447         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2448         unsigned int i;
2449         ASSERT(curr->is_read());
2450
2451         ModelAction *last_sc_write = NULL;
2452
2453         if (curr->is_seqcst())
2454                 last_sc_write = get_last_seq_cst_write(curr);
2455
2456         /* Iterate over all threads */
2457         for (i = 0; i < thrd_lists->size(); i++) {
2458                 /* Iterate over actions in thread, starting from most recent */
2459                 action_list_t *list = &(*thrd_lists)[i];
2460                 action_list_t::reverse_iterator rit;
2461                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2462                         ModelAction *act = *rit;
2463
2464                         /* Only consider 'write' actions */
2465                         if (!act->is_write() || act == curr)
2466                                 continue;
2467
2468                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2469                         bool allow_read = true;
2470
2471                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2472                                 allow_read = false;
2473                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2474                                 allow_read = false;
2475
2476                         if (allow_read) {
2477                                 /* Only add feasible reads */
2478                                 mo_graph->startChanges();
2479                                 r_modification_order(curr, act);
2480                                 if (!is_infeasible())
2481                                         curr->get_node()->add_read_from(act);
2482                                 mo_graph->rollbackChanges();
2483                         }
2484
2485                         /* Include at most one act per-thread that "happens before" curr */
2486                         if (act->happens_before(curr))
2487                                 break;
2488                 }
2489         }
2490         /* We may find no valid may-read-from only if the execution is doomed */
2491         if (!curr->get_node()->get_read_from_size()) {
2492                 priv->no_valid_reads = true;
2493                 set_assert();
2494         }
2495
2496         if (DBG_ENABLED()) {
2497                 model_print("Reached read action:\n");
2498                 curr->print();
2499                 model_print("Printing may_read_from\n");
2500                 curr->get_node()->print_may_read_from();
2501                 model_print("End printing may_read_from\n");
2502         }
2503 }
2504
2505 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2506 {
2507         while (true) {
2508                 /* UNINIT actions don't have a Node, and they never sleep */
2509                 if (write->is_uninitialized())
2510                         return true;
2511                 Node *prevnode = write->get_node()->get_parent();
2512
2513                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2514                 if (write->is_release() && thread_sleep)
2515                         return true;
2516                 if (!write->is_rmw()) {
2517                         return false;
2518                 }
2519                 if (write->get_reads_from() == NULL)
2520                         return true;
2521                 write = write->get_reads_from();
2522         }
2523 }
2524
2525 /**
2526  * @brief Create a new action representing an uninitialized atomic
2527  * @param location The memory location of the atomic object
2528  * @return A pointer to a new ModelAction
2529  */
2530 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2531 {
2532         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2533         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2534         act->create_cv(NULL);
2535         return act;
2536 }
2537
2538 static void print_list(action_list_t *list)
2539 {
2540         action_list_t::iterator it;
2541
2542         model_print("---------------------------------------------------------------------\n");
2543
2544         unsigned int hash = 0;
2545
2546         for (it = list->begin(); it != list->end(); it++) {
2547                 (*it)->print();
2548                 hash = hash^(hash<<3)^((*it)->hash());
2549         }
2550         model_print("HASH %u\n", hash);
2551         model_print("---------------------------------------------------------------------\n");
2552 }
2553
2554 #if SUPPORT_MOD_ORDER_DUMP
2555 void ModelChecker::dumpGraph(char *filename) const
2556 {
2557         char buffer[200];
2558         sprintf(buffer, "%s.dot", filename);
2559         FILE *file = fopen(buffer, "w");
2560         fprintf(file, "digraph %s {\n", filename);
2561         mo_graph->dumpNodes(file);
2562         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2563
2564         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2565                 ModelAction *action = *it;
2566                 if (action->is_read()) {
2567                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2568                         if (action->get_reads_from() != NULL)
2569                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2570                 }
2571                 if (thread_array[action->get_tid()] != NULL) {
2572                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2573                 }
2574
2575                 thread_array[action->get_tid()] = action;
2576         }
2577         fprintf(file, "}\n");
2578         model_free(thread_array);
2579         fclose(file);
2580 }
2581 #endif
2582
2583 /** @brief Prints an execution trace summary. */
2584 void ModelChecker::print_summary() const
2585 {
2586 #if SUPPORT_MOD_ORDER_DUMP
2587         char buffername[100];
2588         sprintf(buffername, "exec%04u", stats.num_total);
2589         mo_graph->dumpGraphToFile(buffername);
2590         sprintf(buffername, "graph%04u", stats.num_total);
2591         dumpGraph(buffername);
2592 #endif
2593
2594         model_print("Execution %d:", stats.num_total);
2595         if (isfeasibleprefix())
2596                 model_print("\n");
2597         else
2598                 print_infeasibility(" INFEASIBLE");
2599         print_list(action_trace);
2600         model_print("\n");
2601 }
2602
2603 /**
2604  * Add a Thread to the system for the first time. Should only be called once
2605  * per thread.
2606  * @param t The Thread to add
2607  */
2608 void ModelChecker::add_thread(Thread *t)
2609 {
2610         thread_map->put(id_to_int(t->get_id()), t);
2611         scheduler->add_thread(t);
2612 }
2613
2614 /**
2615  * Removes a thread from the scheduler.
2616  * @param the thread to remove.
2617  */
2618 void ModelChecker::remove_thread(Thread *t)
2619 {
2620         scheduler->remove_thread(t);
2621 }
2622
2623 /**
2624  * @brief Get a Thread reference by its ID
2625  * @param tid The Thread's ID
2626  * @return A Thread reference
2627  */
2628 Thread * ModelChecker::get_thread(thread_id_t tid) const
2629 {
2630         return thread_map->get(id_to_int(tid));
2631 }
2632
2633 /**
2634  * @brief Get a reference to the Thread in which a ModelAction was executed
2635  * @param act The ModelAction
2636  * @return A Thread reference
2637  */
2638 Thread * ModelChecker::get_thread(const ModelAction *act) const
2639 {
2640         return get_thread(act->get_tid());
2641 }
2642
2643 /**
2644  * @brief Check if a Thread is currently enabled
2645  * @param t The Thread to check
2646  * @return True if the Thread is currently enabled
2647  */
2648 bool ModelChecker::is_enabled(Thread *t) const
2649 {
2650         return scheduler->is_enabled(t);
2651 }
2652
2653 /**
2654  * @brief Check if a Thread is currently enabled
2655  * @param tid The ID of the Thread to check
2656  * @return True if the Thread is currently enabled
2657  */
2658 bool ModelChecker::is_enabled(thread_id_t tid) const
2659 {
2660         return scheduler->is_enabled(tid);
2661 }
2662
2663 /**
2664  * Switch from a user-context to the "master thread" context (a.k.a. system
2665  * context). This switch is made with the intention of exploring a particular
2666  * model-checking action (described by a ModelAction object). Must be called
2667  * from a user-thread context.
2668  *
2669  * @param act The current action that will be explored. May be NULL only if
2670  * trace is exiting via an assertion (see ModelChecker::set_assert and
2671  * ModelChecker::has_asserted).
2672  * @return Return the value returned by the current action
2673  */
2674 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2675 {
2676         DBG();
2677         Thread *old = thread_current();
2678         set_current_action(act);
2679         if (Thread::swap(old, &system_context) < 0) {
2680                 perror("swap threads");
2681                 exit(EXIT_FAILURE);
2682         }
2683         return old->get_return_value();
2684 }
2685
2686 /**
2687  * Takes the next step in the execution, if possible.
2688  * @param curr The current step to take
2689  * @return Returns true (success) if a step was taken and false otherwise.
2690  */
2691 bool ModelChecker::take_step(ModelAction *curr)
2692 {
2693         if (has_asserted())
2694                 return false;
2695
2696         Thread *curr_thrd = get_thread(curr);
2697         ASSERT(curr_thrd->get_state() == THREAD_READY);
2698
2699         curr = check_current_action(curr);
2700
2701         /* Infeasible -> don't take any more steps */
2702         if (is_infeasible())
2703                 return false;
2704         else if (isfeasibleprefix() && have_bug_reports()) {
2705                 set_assert();
2706                 return false;
2707         }
2708
2709         if (params.bound != 0)
2710                 if (priv->used_sequence_numbers > params.bound)
2711                         return false;
2712
2713         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2714                 scheduler->remove_thread(curr_thrd);
2715
2716         Thread *next_thrd = get_next_thread(curr);
2717         next_thrd = scheduler->next_thread(next_thrd);
2718
2719         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2720                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2721
2722         /*
2723          * Launch end-of-execution release sequence fixups only when there are:
2724          *
2725          * (1) no more user threads to run (or when execution replay chooses
2726          *     the 'model_thread')
2727          * (2) pending release sequences
2728          * (3) pending assertions (i.e., data races)
2729          * (4) no pending promises
2730          */
2731         if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2732                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2733                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2734                                 pending_rel_seqs->size());
2735                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2736                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2737                                 model_thread);
2738                 set_current_action(fixup);
2739                 return true;
2740         }
2741
2742         /* next_thrd == NULL -> don't take any more steps */
2743         if (!next_thrd)
2744                 return false;
2745
2746         if (next_thrd->get_pending() != NULL) {
2747                 /* restart a pending action */
2748                 set_current_action(next_thrd->get_pending());
2749                 next_thrd->set_pending(NULL);
2750                 return true;
2751         }
2752
2753         /* Return false only if swap fails with an error */
2754         return (Thread::swap(&system_context, next_thrd) == 0);
2755 }
2756
2757 /** Wrapper to run the user's main function, with appropriate arguments */
2758 void user_main_wrapper(void *)
2759 {
2760         user_main(model->params.argc, model->params.argv);
2761 }
2762
2763 /** @brief Run ModelChecker for the user program */
2764 void ModelChecker::run()
2765 {
2766         do {
2767                 thrd_t user_thread;
2768                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2769
2770                 add_thread(t);
2771
2772                 /* Run user thread up to its first action */
2773                 scheduler->next_thread(t);
2774                 Thread::swap(&system_context, t);
2775
2776                 /* Wait for all threads to complete */
2777                 while (take_step(priv->current_action));
2778         } while (next_execution());
2779
2780         print_stats();
2781 }