model: stash all pending actions immediately
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         snapshot_backtrack_before(0);
163 }
164
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
167 {
168         return priv->next_thread_id++;
169 }
170
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
173 {
174         return priv->next_thread_id;
175 }
176
177 /**
178  * Must be called from user-thread context (e.g., through the global
179  * thread_current() interface)
180  *
181  * @return The currently executing Thread.
182  */
183 Thread * ModelChecker::get_current_thread() const
184 {
185         return scheduler->get_current_thread();
186 }
187
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
190 {
191         return ++priv->used_sequence_numbers;
192 }
193
194 Node * ModelChecker::get_curr_node() const
195 {
196         return node_stack->get_head();
197 }
198
199 /**
200  * @brief Choose the next thread to execute.
201  *
202  * This function chooses the next thread that should execute. It can force the
203  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205  * The model-checker may have no preference regarding the next thread (i.e.,
206  * when exploring a new execution ordering), in which case this will return
207  * NULL.
208  * @param curr The current ModelAction. This action might guide the choice of
209  * next thread.
210  * @return The next thread to run. If the model-checker has no preference, NULL.
211  */
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
213 {
214         thread_id_t tid;
215
216         if (curr != NULL) {
217                 /* Do not split atomic actions. */
218                 if (curr->is_rmwr())
219                         return get_thread(curr);
220                 else if (curr->get_type() == THREAD_CREATE)
221                         return curr->get_thread_operand();
222         }
223
224         /* Have we completed exploring the preselected path? */
225         if (diverge == NULL)
226                 return NULL;
227
228         /* Else, we are trying to replay an execution */
229         ModelAction *next = node_stack->get_next()->get_action();
230
231         if (next == diverge) {
232                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
233                         earliest_diverge = diverge;
234
235                 Node *nextnode = next->get_node();
236                 Node *prevnode = nextnode->get_parent();
237                 scheduler->update_sleep_set(prevnode);
238
239                 /* Reached divergence point */
240                 if (nextnode->increment_misc()) {
241                         /* The next node will try to satisfy a different misc_index values. */
242                         tid = next->get_tid();
243                         node_stack->pop_restofstack(2);
244                 } else if (nextnode->increment_promise()) {
245                         /* The next node will try to satisfy a different set of promises. */
246                         tid = next->get_tid();
247                         node_stack->pop_restofstack(2);
248                 } else if (nextnode->increment_read_from()) {
249                         /* The next node will read from a different value. */
250                         tid = next->get_tid();
251                         node_stack->pop_restofstack(2);
252                 } else if (nextnode->increment_future_value()) {
253                         /* The next node will try to read from a different future value. */
254                         tid = next->get_tid();
255                         node_stack->pop_restofstack(2);
256                 } else if (nextnode->increment_relseq_break()) {
257                         /* The next node will try to resolve a release sequence differently */
258                         tid = next->get_tid();
259                         node_stack->pop_restofstack(2);
260                 } else {
261                         ASSERT(prevnode);
262                         /* Make a different thread execute for next step */
263                         scheduler->add_sleep(get_thread(next->get_tid()));
264                         tid = prevnode->get_next_backtrack();
265                         /* Make sure the backtracked thread isn't sleeping. */
266                         node_stack->pop_restofstack(1);
267                         if (diverge == earliest_diverge) {
268                                 earliest_diverge = prevnode->get_action();
269                         }
270                 }
271                 /* The correct sleep set is in the parent node. */
272                 execute_sleep_set();
273
274                 DEBUG("*** Divergence point ***\n");
275
276                 diverge = NULL;
277         } else {
278                 tid = next->get_tid();
279         }
280         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
281         ASSERT(tid != THREAD_ID_T_NONE);
282         return thread_map->get(id_to_int(tid));
283 }
284
285 /**
286  * We need to know what the next actions of all threads in the sleep
287  * set will be.  This method computes them and stores the actions at
288  * the corresponding thread object's pending action.
289  */
290
291 void ModelChecker::execute_sleep_set()
292 {
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *thr = get_thread(tid);
296                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
297                         thr->get_pending()->set_sleep_flag();
298                 }
299         }
300 }
301
302 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
303 {
304         for (unsigned int i = 0; i < get_num_threads(); i++) {
305                 Thread *thr = get_thread(int_to_id(i));
306                 if (scheduler->is_sleep_set(thr)) {
307                         ModelAction *pending_act = thr->get_pending();
308                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
309                                 //Remove this thread from sleep set
310                                 scheduler->remove_sleep(thr);
311                 }
312         }
313 }
314
315 /** @brief Alert the model-checker that an incorrectly-ordered
316  * synchronization was made */
317 void ModelChecker::set_bad_synchronization()
318 {
319         priv->bad_synchronization = true;
320 }
321
322 bool ModelChecker::has_asserted() const
323 {
324         return priv->asserted;
325 }
326
327 void ModelChecker::set_assert()
328 {
329         priv->asserted = true;
330 }
331
332 /**
333  * Check if we are in a deadlock. Should only be called at the end of an
334  * execution, although it should not give false positives in the middle of an
335  * execution (there should be some ENABLED thread).
336  *
337  * @return True if program is in a deadlock; false otherwise
338  */
339 bool ModelChecker::is_deadlocked() const
340 {
341         bool blocking_threads = false;
342         for (unsigned int i = 0; i < get_num_threads(); i++) {
343                 thread_id_t tid = int_to_id(i);
344                 if (is_enabled(tid))
345                         return false;
346                 Thread *t = get_thread(tid);
347                 if (!t->is_model_thread() && t->get_pending())
348                         blocking_threads = true;
349         }
350         return blocking_threads;
351 }
352
353 /**
354  * Check if this is a complete execution. That is, have all thread completed
355  * execution (rather than exiting because sleep sets have forced a redundant
356  * execution).
357  *
358  * @return True if the execution is complete.
359  */
360 bool ModelChecker::is_complete_execution() const
361 {
362         for (unsigned int i = 0; i < get_num_threads(); i++)
363                 if (is_enabled(int_to_id(i)))
364                         return false;
365         return true;
366 }
367
368 /**
369  * @brief Assert a bug in the executing program.
370  *
371  * Use this function to assert any sort of bug in the user program. If the
372  * current trace is feasible (actually, a prefix of some feasible execution),
373  * then this execution will be aborted, printing the appropriate message. If
374  * the current trace is not yet feasible, the error message will be stashed and
375  * printed if the execution ever becomes feasible.
376  *
377  * @param msg Descriptive message for the bug (do not include newline char)
378  * @return True if bug is immediately-feasible
379  */
380 bool ModelChecker::assert_bug(const char *msg)
381 {
382         priv->bugs.push_back(new bug_message(msg));
383
384         if (isfeasibleprefix()) {
385                 set_assert();
386                 return true;
387         }
388         return false;
389 }
390
391 /**
392  * @brief Assert a bug in the executing program, asserted by a user thread
393  * @see ModelChecker::assert_bug
394  * @param msg Descriptive message for the bug (do not include newline char)
395  */
396 void ModelChecker::assert_user_bug(const char *msg)
397 {
398         /* If feasible bug, bail out now */
399         if (assert_bug(msg))
400                 switch_to_master(NULL);
401 }
402
403 /** @return True, if any bugs have been reported for this execution */
404 bool ModelChecker::have_bug_reports() const
405 {
406         return priv->bugs.size() != 0;
407 }
408
409 /** @brief Print bug report listing for this execution (if any bugs exist) */
410 void ModelChecker::print_bugs() const
411 {
412         if (have_bug_reports()) {
413                 model_print("Bug report: %zu bug%s detected\n",
414                                 priv->bugs.size(),
415                                 priv->bugs.size() > 1 ? "s" : "");
416                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
417                         priv->bugs[i]->print();
418         }
419 }
420
421 /**
422  * @brief Record end-of-execution stats
423  *
424  * Must be run when exiting an execution. Records various stats.
425  * @see struct execution_stats
426  */
427 void ModelChecker::record_stats()
428 {
429         stats.num_total++;
430         if (!isfeasibleprefix())
431                 stats.num_infeasible++;
432         else if (have_bug_reports())
433                 stats.num_buggy_executions++;
434         else if (is_complete_execution())
435                 stats.num_complete++;
436         else
437                 stats.num_redundant++;
438 }
439
440 /** @brief Print execution stats */
441 void ModelChecker::print_stats() const
442 {
443         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
444         model_print("Number of redundant executions: %d\n", stats.num_redundant);
445         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
446         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
447         model_print("Total executions: %d\n", stats.num_total);
448         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
449 }
450
451 /**
452  * @brief End-of-exeuction print
453  * @param printbugs Should any existing bugs be printed?
454  */
455 void ModelChecker::print_execution(bool printbugs) const
456 {
457         print_program_output();
458
459         if (DBG_ENABLED() || params.verbose) {
460                 model_print("Earliest divergence point since last feasible execution:\n");
461                 if (earliest_diverge)
462                         earliest_diverge->print();
463                 else
464                         model_print("(Not set)\n");
465
466                 model_print("\n");
467                 print_stats();
468         }
469
470         /* Don't print invalid bugs */
471         if (printbugs)
472                 print_bugs();
473
474         model_print("\n");
475         print_summary();
476 }
477
478 /**
479  * Queries the model-checker for more executions to explore and, if one
480  * exists, resets the model-checker state to execute a new execution.
481  *
482  * @return If there are more executions to explore, return true. Otherwise,
483  * return false.
484  */
485 bool ModelChecker::next_execution()
486 {
487         DBG();
488         /* Is this execution a feasible execution that's worth bug-checking? */
489         bool complete = isfeasibleprefix() && (is_complete_execution() ||
490                         have_bug_reports());
491
492         /* End-of-execution bug checks */
493         if (complete) {
494                 if (is_deadlocked())
495                         assert_bug("Deadlock detected");
496
497                 checkDataRaces();
498         }
499
500         record_stats();
501
502         /* Output */
503         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
504                 print_execution(complete);
505         else
506                 clear_program_output();
507
508         if (complete)
509                 earliest_diverge = NULL;
510
511         if ((diverge = get_next_backtrack()) == NULL)
512                 return false;
513
514         if (DBG_ENABLED()) {
515                 model_print("Next execution will diverge at:\n");
516                 diverge->print();
517         }
518
519         reset_to_initial_state();
520         return true;
521 }
522
523 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 {
525         switch (act->get_type()) {
526         case ATOMIC_FENCE:
527         case ATOMIC_READ:
528         case ATOMIC_WRITE:
529         case ATOMIC_RMW: {
530                 /* Optimization: relaxed operations don't need backtracking */
531                 if (act->is_relaxed())
532                         return NULL;
533                 /* linear search: from most recent to oldest */
534                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
535                 action_list_t::reverse_iterator rit;
536                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
537                         ModelAction *prev = *rit;
538                         if (prev->could_synchronize_with(act))
539                                 return prev;
540                 }
541                 break;
542         }
543         case ATOMIC_LOCK:
544         case ATOMIC_TRYLOCK: {
545                 /* linear search: from most recent to oldest */
546                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
547                 action_list_t::reverse_iterator rit;
548                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
549                         ModelAction *prev = *rit;
550                         if (act->is_conflicting_lock(prev))
551                                 return prev;
552                 }
553                 break;
554         }
555         case ATOMIC_UNLOCK: {
556                 /* linear search: from most recent to oldest */
557                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
558                 action_list_t::reverse_iterator rit;
559                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
560                         ModelAction *prev = *rit;
561                         if (!act->same_thread(prev) && prev->is_failed_trylock())
562                                 return prev;
563                 }
564                 break;
565         }
566         case ATOMIC_WAIT: {
567                 /* linear search: from most recent to oldest */
568                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
569                 action_list_t::reverse_iterator rit;
570                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
571                         ModelAction *prev = *rit;
572                         if (!act->same_thread(prev) && prev->is_failed_trylock())
573                                 return prev;
574                         if (!act->same_thread(prev) && prev->is_notify())
575                                 return prev;
576                 }
577                 break;
578         }
579
580         case ATOMIC_NOTIFY_ALL:
581         case ATOMIC_NOTIFY_ONE: {
582                 /* linear search: from most recent to oldest */
583                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
584                 action_list_t::reverse_iterator rit;
585                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
586                         ModelAction *prev = *rit;
587                         if (!act->same_thread(prev) && prev->is_wait())
588                                 return prev;
589                 }
590                 break;
591         }
592         default:
593                 break;
594         }
595         return NULL;
596 }
597
598 /** This method finds backtracking points where we should try to
599  * reorder the parameter ModelAction against.
600  *
601  * @param the ModelAction to find backtracking points for.
602  */
603 void ModelChecker::set_backtracking(ModelAction *act)
604 {
605         Thread *t = get_thread(act);
606         ModelAction *prev = get_last_conflict(act);
607         if (prev == NULL)
608                 return;
609
610         Node *node = prev->get_node()->get_parent();
611
612         int low_tid, high_tid;
613         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
614                 low_tid = id_to_int(act->get_tid());
615                 high_tid = low_tid + 1;
616         } else {
617                 low_tid = 0;
618                 high_tid = get_num_threads();
619         }
620
621         for (int i = low_tid; i < high_tid; i++) {
622                 thread_id_t tid = int_to_id(i);
623
624                 /* Make sure this thread can be enabled here. */
625                 if (i >= node->get_num_threads())
626                         break;
627
628                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
629                 if (node->enabled_status(tid) != THREAD_ENABLED)
630                         continue;
631
632                 /* Check if this has been explored already */
633                 if (node->has_been_explored(tid))
634                         continue;
635
636                 /* See if fairness allows */
637                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638                         bool unfair = false;
639                         for (int t = 0; t < node->get_num_threads(); t++) {
640                                 thread_id_t tother = int_to_id(t);
641                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
642                                         unfair = true;
643                                         break;
644                                 }
645                         }
646                         if (unfair)
647                                 continue;
648                 }
649                 /* Cache the latest backtracking point */
650                 set_latest_backtrack(prev);
651
652                 /* If this is a new backtracking point, mark the tree */
653                 if (!node->set_backtrack(tid))
654                         continue;
655                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
656                                         id_to_int(prev->get_tid()),
657                                         id_to_int(t->get_id()));
658                 if (DBG_ENABLED()) {
659                         prev->print();
660                         act->print();
661                 }
662         }
663 }
664
665 /**
666  * @brief Cache the a backtracking point as the "most recent", if eligible
667  *
668  * Note that this does not prepare the NodeStack for this backtracking
669  * operation, it only caches the action on a per-execution basis
670  *
671  * @param act The operation at which we should explore a different next action
672  * (i.e., backtracking point)
673  * @return True, if this action is now the most recent backtracking point;
674  * false otherwise
675  */
676 bool ModelChecker::set_latest_backtrack(ModelAction *act)
677 {
678         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
679                 priv->next_backtrack = act;
680                 return true;
681         }
682         return false;
683 }
684
685 /**
686  * Returns last backtracking point. The model checker will explore a different
687  * path for this point in the next execution.
688  * @return The ModelAction at which the next execution should diverge.
689  */
690 ModelAction * ModelChecker::get_next_backtrack()
691 {
692         ModelAction *next = priv->next_backtrack;
693         priv->next_backtrack = NULL;
694         return next;
695 }
696
697 /**
698  * Processes a read or rmw model action.
699  * @param curr is the read model action to process.
700  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
701  * @return True if processing this read updates the mo_graph.
702  */
703 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
704 {
705         uint64_t value = VALUE_NONE;
706         bool updated = false;
707         while (true) {
708                 const ModelAction *reads_from = curr->get_node()->get_read_from();
709                 if (reads_from != NULL) {
710                         mo_graph->startChanges();
711
712                         value = reads_from->get_value();
713                         bool r_status = false;
714
715                         if (!second_part_of_rmw) {
716                                 check_recency(curr, reads_from);
717                                 r_status = r_modification_order(curr, reads_from);
718                         }
719
720                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
721                                 mo_graph->rollbackChanges();
722                                 priv->too_many_reads = false;
723                                 continue;
724                         }
725
726                         read_from(curr, reads_from);
727                         mo_graph->commitChanges();
728                         mo_check_promises(curr, true);
729
730                         updated |= r_status;
731                 } else if (!second_part_of_rmw) {
732                         /* Read from future value */
733                         struct future_value fv = curr->get_node()->get_future_value();
734                         Promise *promise = new Promise(curr, fv);
735                         value = fv.value;
736                         curr->set_read_from_promise(promise);
737                         promises->push_back(promise);
738                         mo_graph->startChanges();
739                         updated = r_modification_order(curr, promise);
740                         mo_graph->commitChanges();
741                 }
742                 get_thread(curr)->set_return_value(value);
743                 return updated;
744         }
745 }
746
747 /**
748  * Processes a lock, trylock, or unlock model action.  @param curr is
749  * the read model action to process.
750  *
751  * The try lock operation checks whether the lock is taken.  If not,
752  * it falls to the normal lock operation case.  If so, it returns
753  * fail.
754  *
755  * The lock operation has already been checked that it is enabled, so
756  * it just grabs the lock and synchronizes with the previous unlock.
757  *
758  * The unlock operation has to re-enable all of the threads that are
759  * waiting on the lock.
760  *
761  * @return True if synchronization was updated; false otherwise
762  */
763 bool ModelChecker::process_mutex(ModelAction *curr)
764 {
765         std::mutex *mutex = NULL;
766         struct std::mutex_state *state = NULL;
767
768         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
769                 mutex = (std::mutex *)curr->get_location();
770                 state = mutex->get_state();
771         } else if (curr->is_wait()) {
772                 mutex = (std::mutex *)curr->get_value();
773                 state = mutex->get_state();
774         }
775
776         switch (curr->get_type()) {
777         case ATOMIC_TRYLOCK: {
778                 bool success = !state->islocked;
779                 curr->set_try_lock(success);
780                 if (!success) {
781                         get_thread(curr)->set_return_value(0);
782                         break;
783                 }
784                 get_thread(curr)->set_return_value(1);
785         }
786                 //otherwise fall into the lock case
787         case ATOMIC_LOCK: {
788                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
789                         assert_bug("Lock access before initialization");
790                 state->islocked = true;
791                 ModelAction *unlock = get_last_unlock(curr);
792                 //synchronize with the previous unlock statement
793                 if (unlock != NULL) {
794                         curr->synchronize_with(unlock);
795                         return true;
796                 }
797                 break;
798         }
799         case ATOMIC_UNLOCK: {
800                 //unlock the lock
801                 state->islocked = false;
802                 //wake up the other threads
803                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
804                 //activate all the waiting threads
805                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
806                         scheduler->wake(get_thread(*rit));
807                 }
808                 waiters->clear();
809                 break;
810         }
811         case ATOMIC_WAIT: {
812                 //unlock the lock
813                 state->islocked = false;
814                 //wake up the other threads
815                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
816                 //activate all the waiting threads
817                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
818                         scheduler->wake(get_thread(*rit));
819                 }
820                 waiters->clear();
821                 //check whether we should go to sleep or not...simulate spurious failures
822                 if (curr->get_node()->get_misc() == 0) {
823                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
824                         //disable us
825                         scheduler->sleep(get_thread(curr));
826                 }
827                 break;
828         }
829         case ATOMIC_NOTIFY_ALL: {
830                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
831                 //activate all the waiting threads
832                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
833                         scheduler->wake(get_thread(*rit));
834                 }
835                 waiters->clear();
836                 break;
837         }
838         case ATOMIC_NOTIFY_ONE: {
839                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
840                 int wakeupthread = curr->get_node()->get_misc();
841                 action_list_t::iterator it = waiters->begin();
842                 advance(it, wakeupthread);
843                 scheduler->wake(get_thread(*it));
844                 waiters->erase(it);
845                 break;
846         }
847
848         default:
849                 ASSERT(0);
850         }
851         return false;
852 }
853
854 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
855 {
856         /* Do more ambitious checks now that mo is more complete */
857         if (mo_may_allow(writer, reader)) {
858                 Node *node = reader->get_node();
859
860                 /* Find an ancestor thread which exists at the time of the reader */
861                 Thread *write_thread = get_thread(writer);
862                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
863                         write_thread = write_thread->get_parent();
864
865                 struct future_value fv = {
866                         writer->get_value(),
867                         writer->get_seq_number() + params.maxfuturedelay,
868                         write_thread->get_id(),
869                 };
870                 if (node->add_future_value(fv))
871                         set_latest_backtrack(reader);
872         }
873 }
874
875 /**
876  * Process a write ModelAction
877  * @param curr The ModelAction to process
878  * @return True if the mo_graph was updated or promises were resolved
879  */
880 bool ModelChecker::process_write(ModelAction *curr)
881 {
882         bool updated_mod_order = w_modification_order(curr);
883         bool updated_promises = resolve_promises(curr);
884
885         if (promises->size() == 0) {
886                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
887                         struct PendingFutureValue pfv = (*futurevalues)[i];
888                         add_future_value(pfv.writer, pfv.act);
889                 }
890                 futurevalues->clear();
891         }
892
893         mo_graph->commitChanges();
894         mo_check_promises(curr, false);
895
896         get_thread(curr)->set_return_value(VALUE_NONE);
897         return updated_mod_order || updated_promises;
898 }
899
900 /**
901  * Process a fence ModelAction
902  * @param curr The ModelAction to process
903  * @return True if synchronization was updated
904  */
905 bool ModelChecker::process_fence(ModelAction *curr)
906 {
907         /*
908          * fence-relaxed: no-op
909          * fence-release: only log the occurence (not in this function), for
910          *   use in later synchronization
911          * fence-acquire (this function): search for hypothetical release
912          *   sequences
913          */
914         bool updated = false;
915         if (curr->is_acquire()) {
916                 action_list_t *list = action_trace;
917                 action_list_t::reverse_iterator rit;
918                 /* Find X : is_read(X) && X --sb-> curr */
919                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
920                         ModelAction *act = *rit;
921                         if (act == curr)
922                                 continue;
923                         if (act->get_tid() != curr->get_tid())
924                                 continue;
925                         /* Stop at the beginning of the thread */
926                         if (act->is_thread_start())
927                                 break;
928                         /* Stop once we reach a prior fence-acquire */
929                         if (act->is_fence() && act->is_acquire())
930                                 break;
931                         if (!act->is_read())
932                                 continue;
933                         /* read-acquire will find its own release sequences */
934                         if (act->is_acquire())
935                                 continue;
936
937                         /* Establish hypothetical release sequences */
938                         rel_heads_list_t release_heads;
939                         get_release_seq_heads(curr, act, &release_heads);
940                         for (unsigned int i = 0; i < release_heads.size(); i++)
941                                 if (!curr->synchronize_with(release_heads[i]))
942                                         set_bad_synchronization();
943                         if (release_heads.size() != 0)
944                                 updated = true;
945                 }
946         }
947         return updated;
948 }
949
950 /**
951  * @brief Process the current action for thread-related activity
952  *
953  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
954  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
955  * synchronization, etc.  This function is a no-op for non-THREAD actions
956  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
957  *
958  * @param curr The current action
959  * @return True if synchronization was updated or a thread completed
960  */
961 bool ModelChecker::process_thread_action(ModelAction *curr)
962 {
963         bool updated = false;
964
965         switch (curr->get_type()) {
966         case THREAD_CREATE: {
967                 thrd_t *thrd = (thrd_t *)curr->get_location();
968                 struct thread_params *params = (struct thread_params *)curr->get_value();
969                 Thread *th = new Thread(thrd, params->func, params->arg);
970                 add_thread(th);
971                 th->set_creation(curr);
972                 /* Promises can be satisfied by children */
973                 for (unsigned int i = 0; i < promises->size(); i++) {
974                         Promise *promise = (*promises)[i];
975                         if (promise->thread_is_available(curr->get_tid()))
976                                 promise->add_thread(th->get_id());
977                 }
978                 break;
979         }
980         case THREAD_JOIN: {
981                 Thread *blocking = curr->get_thread_operand();
982                 ModelAction *act = get_last_action(blocking->get_id());
983                 curr->synchronize_with(act);
984                 updated = true; /* trigger rel-seq checks */
985                 break;
986         }
987         case THREAD_FINISH: {
988                 Thread *th = get_thread(curr);
989                 while (!th->wait_list_empty()) {
990                         ModelAction *act = th->pop_wait_list();
991                         scheduler->wake(get_thread(act));
992                 }
993                 th->complete();
994                 /* Completed thread can't satisfy promises */
995                 for (unsigned int i = 0; i < promises->size(); i++) {
996                         Promise *promise = (*promises)[i];
997                         if (promise->thread_is_available(th->get_id()))
998                                 if (promise->eliminate_thread(th->get_id()))
999                                         priv->failed_promise = true;
1000                 }
1001                 updated = true; /* trigger rel-seq checks */
1002                 break;
1003         }
1004         case THREAD_START: {
1005                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1006                 break;
1007         }
1008         default:
1009                 break;
1010         }
1011
1012         return updated;
1013 }
1014
1015 /**
1016  * @brief Process the current action for release sequence fixup activity
1017  *
1018  * Performs model-checker release sequence fixups for the current action,
1019  * forcing a single pending release sequence to break (with a given, potential
1020  * "loose" write) or to complete (i.e., synchronize). If a pending release
1021  * sequence forms a complete release sequence, then we must perform the fixup
1022  * synchronization, mo_graph additions, etc.
1023  *
1024  * @param curr The current action; must be a release sequence fixup action
1025  * @param work_queue The work queue to which to add work items as they are
1026  * generated
1027  */
1028 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1029 {
1030         const ModelAction *write = curr->get_node()->get_relseq_break();
1031         struct release_seq *sequence = pending_rel_seqs->back();
1032         pending_rel_seqs->pop_back();
1033         ASSERT(sequence);
1034         ModelAction *acquire = sequence->acquire;
1035         const ModelAction *rf = sequence->rf;
1036         const ModelAction *release = sequence->release;
1037         ASSERT(acquire);
1038         ASSERT(release);
1039         ASSERT(rf);
1040         ASSERT(release->same_thread(rf));
1041
1042         if (write == NULL) {
1043                 /**
1044                  * @todo Forcing a synchronization requires that we set
1045                  * modification order constraints. For instance, we can't allow
1046                  * a fixup sequence in which two separate read-acquire
1047                  * operations read from the same sequence, where the first one
1048                  * synchronizes and the other doesn't. Essentially, we can't
1049                  * allow any writes to insert themselves between 'release' and
1050                  * 'rf'
1051                  */
1052
1053                 /* Must synchronize */
1054                 if (!acquire->synchronize_with(release)) {
1055                         set_bad_synchronization();
1056                         return;
1057                 }
1058                 /* Re-check all pending release sequences */
1059                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1060                 /* Re-check act for mo_graph edges */
1061                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1062
1063                 /* propagate synchronization to later actions */
1064                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1065                 for (; (*rit) != acquire; rit++) {
1066                         ModelAction *propagate = *rit;
1067                         if (acquire->happens_before(propagate)) {
1068                                 propagate->synchronize_with(acquire);
1069                                 /* Re-check 'propagate' for mo_graph edges */
1070                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1071                         }
1072                 }
1073         } else {
1074                 /* Break release sequence with new edges:
1075                  *   release --mo--> write --mo--> rf */
1076                 mo_graph->addEdge(release, write);
1077                 mo_graph->addEdge(write, rf);
1078         }
1079
1080         /* See if we have realized a data race */
1081         checkDataRaces();
1082 }
1083
1084 /**
1085  * Initialize the current action by performing one or more of the following
1086  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1087  * in the NodeStack, manipulating backtracking sets, allocating and
1088  * initializing clock vectors, and computing the promises to fulfill.
1089  *
1090  * @param curr The current action, as passed from the user context; may be
1091  * freed/invalidated after the execution of this function, with a different
1092  * action "returned" its place (pass-by-reference)
1093  * @return True if curr is a newly-explored action; false otherwise
1094  */
1095 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1096 {
1097         ModelAction *newcurr;
1098
1099         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1100                 newcurr = process_rmw(*curr);
1101                 delete *curr;
1102
1103                 if (newcurr->is_rmw())
1104                         compute_promises(newcurr);
1105
1106                 *curr = newcurr;
1107                 return false;
1108         }
1109
1110         (*curr)->set_seq_number(get_next_seq_num());
1111
1112         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1113         if (newcurr) {
1114                 /* First restore type and order in case of RMW operation */
1115                 if ((*curr)->is_rmwr())
1116                         newcurr->copy_typeandorder(*curr);
1117
1118                 ASSERT((*curr)->get_location() == newcurr->get_location());
1119                 newcurr->copy_from_new(*curr);
1120
1121                 /* Discard duplicate ModelAction; use action from NodeStack */
1122                 delete *curr;
1123
1124                 /* Always compute new clock vector */
1125                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1126
1127                 *curr = newcurr;
1128                 return false; /* Action was explored previously */
1129         } else {
1130                 newcurr = *curr;
1131
1132                 /* Always compute new clock vector */
1133                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1134
1135                 /* Assign most recent release fence */
1136                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1137
1138                 /*
1139                  * Perform one-time actions when pushing new ModelAction onto
1140                  * NodeStack
1141                  */
1142                 if (newcurr->is_write())
1143                         compute_promises(newcurr);
1144                 else if (newcurr->is_relseq_fixup())
1145                         compute_relseq_breakwrites(newcurr);
1146                 else if (newcurr->is_wait())
1147                         newcurr->get_node()->set_misc_max(2);
1148                 else if (newcurr->is_notify_one()) {
1149                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1150                 }
1151                 return true; /* This was a new ModelAction */
1152         }
1153 }
1154
1155 /**
1156  * @brief Establish reads-from relation between two actions
1157  *
1158  * Perform basic operations involved with establishing a concrete rf relation,
1159  * including setting the ModelAction data and checking for release sequences.
1160  *
1161  * @param act The action that is reading (must be a read)
1162  * @param rf The action from which we are reading (must be a write)
1163  *
1164  * @return True if this read established synchronization
1165  */
1166 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1167 {
1168         act->set_read_from(rf);
1169         if (rf != NULL && act->is_acquire()) {
1170                 rel_heads_list_t release_heads;
1171                 get_release_seq_heads(act, act, &release_heads);
1172                 int num_heads = release_heads.size();
1173                 for (unsigned int i = 0; i < release_heads.size(); i++)
1174                         if (!act->synchronize_with(release_heads[i])) {
1175                                 set_bad_synchronization();
1176                                 num_heads--;
1177                         }
1178                 return num_heads > 0;
1179         }
1180         return false;
1181 }
1182
1183 /**
1184  * @brief Check whether a model action is enabled.
1185  *
1186  * Checks whether a lock or join operation would be successful (i.e., is the
1187  * lock already locked, or is the joined thread already complete). If not, put
1188  * the action in a waiter list.
1189  *
1190  * @param curr is the ModelAction to check whether it is enabled.
1191  * @return a bool that indicates whether the action is enabled.
1192  */
1193 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1194         if (curr->is_lock()) {
1195                 std::mutex *lock = (std::mutex *)curr->get_location();
1196                 struct std::mutex_state *state = lock->get_state();
1197                 if (state->islocked) {
1198                         //Stick the action in the appropriate waiting queue
1199                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1200                         return false;
1201                 }
1202         } else if (curr->get_type() == THREAD_JOIN) {
1203                 Thread *blocking = (Thread *)curr->get_location();
1204                 if (!blocking->is_complete()) {
1205                         blocking->push_wait_list(curr);
1206                         return false;
1207                 }
1208         }
1209
1210         return true;
1211 }
1212
1213 /**
1214  * This is the heart of the model checker routine. It performs model-checking
1215  * actions corresponding to a given "current action." Among other processes, it
1216  * calculates reads-from relationships, updates synchronization clock vectors,
1217  * forms a memory_order constraints graph, and handles replay/backtrack
1218  * execution when running permutations of previously-observed executions.
1219  *
1220  * @param curr The current action to process
1221  * @return The ModelAction that is actually executed; may be different than
1222  * curr; may be NULL, if the current action is not enabled to run
1223  */
1224 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1225 {
1226         ASSERT(curr);
1227         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1228
1229         if (!check_action_enabled(curr)) {
1230                 /* Make the execution look like we chose to run this action
1231                  * much later, when a lock/join can succeed */
1232                 get_thread(curr)->set_pending(curr);
1233                 scheduler->sleep(get_thread(curr));
1234                 return NULL;
1235         }
1236
1237         bool newly_explored = initialize_curr_action(&curr);
1238
1239         DBG();
1240         if (DBG_ENABLED())
1241                 curr->print();
1242
1243         wake_up_sleeping_actions(curr);
1244
1245         /* Add the action to lists before any other model-checking tasks */
1246         if (!second_part_of_rmw)
1247                 add_action_to_lists(curr);
1248
1249         /* Build may_read_from set for newly-created actions */
1250         if (newly_explored && curr->is_read())
1251                 build_reads_from_past(curr);
1252
1253         /* Initialize work_queue with the "current action" work */
1254         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1255         while (!work_queue.empty() && !has_asserted()) {
1256                 WorkQueueEntry work = work_queue.front();
1257                 work_queue.pop_front();
1258
1259                 switch (work.type) {
1260                 case WORK_CHECK_CURR_ACTION: {
1261                         ModelAction *act = work.action;
1262                         bool update = false; /* update this location's release seq's */
1263                         bool update_all = false; /* update all release seq's */
1264
1265                         if (process_thread_action(curr))
1266                                 update_all = true;
1267
1268                         if (act->is_read() && process_read(act, second_part_of_rmw))
1269                                 update = true;
1270
1271                         if (act->is_write() && process_write(act))
1272                                 update = true;
1273
1274                         if (act->is_fence() && process_fence(act))
1275                                 update_all = true;
1276
1277                         if (act->is_mutex_op() && process_mutex(act))
1278                                 update_all = true;
1279
1280                         if (act->is_relseq_fixup())
1281                                 process_relseq_fixup(curr, &work_queue);
1282
1283                         if (update_all)
1284                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1285                         else if (update)
1286                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1287                         break;
1288                 }
1289                 case WORK_CHECK_RELEASE_SEQ:
1290                         resolve_release_sequences(work.location, &work_queue);
1291                         break;
1292                 case WORK_CHECK_MO_EDGES: {
1293                         /** @todo Complete verification of work_queue */
1294                         ModelAction *act = work.action;
1295                         bool updated = false;
1296
1297                         if (act->is_read()) {
1298                                 const ModelAction *rf = act->get_reads_from();
1299                                 const Promise *promise = act->get_reads_from_promise();
1300                                 if (rf) {
1301                                         if (r_modification_order(act, rf))
1302                                                 updated = true;
1303                                 } else if (promise) {
1304                                         if (r_modification_order(act, promise))
1305                                                 updated = true;
1306                                 }
1307                         }
1308                         if (act->is_write()) {
1309                                 if (w_modification_order(act))
1310                                         updated = true;
1311                         }
1312                         mo_graph->commitChanges();
1313
1314                         if (updated)
1315                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1316                         break;
1317                 }
1318                 default:
1319                         ASSERT(false);
1320                         break;
1321                 }
1322         }
1323
1324         check_curr_backtracking(curr);
1325         set_backtracking(curr);
1326         return curr;
1327 }
1328
1329 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1330 {
1331         Node *currnode = curr->get_node();
1332         Node *parnode = currnode->get_parent();
1333
1334         if ((parnode && !parnode->backtrack_empty()) ||
1335                          !currnode->misc_empty() ||
1336                          !currnode->read_from_empty() ||
1337                          !currnode->future_value_empty() ||
1338                          !currnode->promise_empty() ||
1339                          !currnode->relseq_break_empty()) {
1340                 set_latest_backtrack(curr);
1341         }
1342 }
1343
1344 bool ModelChecker::promises_expired() const
1345 {
1346         for (unsigned int i = 0; i < promises->size(); i++) {
1347                 Promise *promise = (*promises)[i];
1348                 if (promise->get_expiration() < priv->used_sequence_numbers)
1349                         return true;
1350         }
1351         return false;
1352 }
1353
1354 /**
1355  * This is the strongest feasibility check available.
1356  * @return whether the current trace (partial or complete) must be a prefix of
1357  * a feasible trace.
1358  */
1359 bool ModelChecker::isfeasibleprefix() const
1360 {
1361         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1362 }
1363
1364 /**
1365  * Print disagnostic information about an infeasible execution
1366  * @param prefix A string to prefix the output with; if NULL, then a default
1367  * message prefix will be provided
1368  */
1369 void ModelChecker::print_infeasibility(const char *prefix) const
1370 {
1371         char buf[100];
1372         char *ptr = buf;
1373         if (mo_graph->checkForCycles())
1374                 ptr += sprintf(ptr, "[mo cycle]");
1375         if (priv->failed_promise)
1376                 ptr += sprintf(ptr, "[failed promise]");
1377         if (priv->too_many_reads)
1378                 ptr += sprintf(ptr, "[too many reads]");
1379         if (priv->no_valid_reads)
1380                 ptr += sprintf(ptr, "[no valid reads-from]");
1381         if (priv->bad_synchronization)
1382                 ptr += sprintf(ptr, "[bad sw ordering]");
1383         if (promises_expired())
1384                 ptr += sprintf(ptr, "[promise expired]");
1385         if (promises->size() != 0)
1386                 ptr += sprintf(ptr, "[unresolved promise]");
1387         if (ptr != buf)
1388                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1389 }
1390
1391 /**
1392  * Returns whether the current completed trace is feasible, except for pending
1393  * release sequences.
1394  */
1395 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1396 {
1397         return !is_infeasible() && promises->size() == 0;
1398 }
1399
1400 /**
1401  * Check if the current partial trace is infeasible. Does not check any
1402  * end-of-execution flags, which might rule out the execution. Thus, this is
1403  * useful only for ruling an execution as infeasible.
1404  * @return whether the current partial trace is infeasible.
1405  */
1406 bool ModelChecker::is_infeasible() const
1407 {
1408         return mo_graph->checkForCycles() ||
1409                 priv->no_valid_reads ||
1410                 priv->failed_promise ||
1411                 priv->too_many_reads ||
1412                 priv->bad_synchronization ||
1413                 promises_expired();
1414 }
1415
1416 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1417 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1418         ModelAction *lastread = get_last_action(act->get_tid());
1419         lastread->process_rmw(act);
1420         if (act->is_rmw()) {
1421                 if (lastread->get_reads_from())
1422                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1423                 else
1424                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1425                 mo_graph->commitChanges();
1426         }
1427         return lastread;
1428 }
1429
1430 /**
1431  * Checks whether a thread has read from the same write for too many times
1432  * without seeing the effects of a later write.
1433  *
1434  * Basic idea:
1435  * 1) there must a different write that we could read from that would satisfy the modification order,
1436  * 2) we must have read from the same value in excess of maxreads times, and
1437  * 3) that other write must have been in the reads_from set for maxreads times.
1438  *
1439  * If so, we decide that the execution is no longer feasible.
1440  */
1441 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1442 {
1443         if (params.maxreads != 0) {
1444                 if (curr->get_node()->get_read_from_size() <= 1)
1445                         return;
1446                 //Must make sure that execution is currently feasible...  We could
1447                 //accidentally clear by rolling back
1448                 if (is_infeasible())
1449                         return;
1450                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1451                 int tid = id_to_int(curr->get_tid());
1452
1453                 /* Skip checks */
1454                 if ((int)thrd_lists->size() <= tid)
1455                         return;
1456                 action_list_t *list = &(*thrd_lists)[tid];
1457
1458                 action_list_t::reverse_iterator rit = list->rbegin();
1459                 /* Skip past curr */
1460                 for (; (*rit) != curr; rit++)
1461                         ;
1462                 /* go past curr now */
1463                 rit++;
1464
1465                 action_list_t::reverse_iterator ritcopy = rit;
1466                 //See if we have enough reads from the same value
1467                 int count = 0;
1468                 for (; count < params.maxreads; rit++, count++) {
1469                         if (rit == list->rend())
1470                                 return;
1471                         ModelAction *act = *rit;
1472                         if (!act->is_read())
1473                                 return;
1474
1475                         if (act->get_reads_from() != rf)
1476                                 return;
1477                         if (act->get_node()->get_read_from_size() <= 1)
1478                                 return;
1479                 }
1480                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1481                         /* Get write */
1482                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1483
1484                         /* Need a different write */
1485                         if (write == rf)
1486                                 continue;
1487
1488                         /* Test to see whether this is a feasible write to read from */
1489                         /** NOTE: all members of read-from set should be
1490                          *  feasible, so we no longer check it here **/
1491
1492                         rit = ritcopy;
1493
1494                         bool feasiblewrite = true;
1495                         //new we need to see if this write works for everyone
1496
1497                         for (int loop = count; loop > 0; loop--, rit++) {
1498                                 ModelAction *act = *rit;
1499                                 bool foundvalue = false;
1500                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1501                                         if (act->get_node()->get_read_from_at(j) == write) {
1502                                                 foundvalue = true;
1503                                                 break;
1504                                         }
1505                                 }
1506                                 if (!foundvalue) {
1507                                         feasiblewrite = false;
1508                                         break;
1509                                 }
1510                         }
1511                         if (feasiblewrite) {
1512                                 priv->too_many_reads = true;
1513                                 return;
1514                         }
1515                 }
1516         }
1517 }
1518
1519 /**
1520  * Updates the mo_graph with the constraints imposed from the current
1521  * read.
1522  *
1523  * Basic idea is the following: Go through each other thread and find
1524  * the last action that happened before our read.  Two cases:
1525  *
1526  * (1) The action is a write => that write must either occur before
1527  * the write we read from or be the write we read from.
1528  *
1529  * (2) The action is a read => the write that that action read from
1530  * must occur before the write we read from or be the same write.
1531  *
1532  * @param curr The current action. Must be a read.
1533  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1534  * @return True if modification order edges were added; false otherwise
1535  */
1536 template <typename rf_type>
1537 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1538 {
1539         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1540         unsigned int i;
1541         bool added = false;
1542         ASSERT(curr->is_read());
1543
1544         /* Last SC fence in the current thread */
1545         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1546
1547         /* Iterate over all threads */
1548         for (i = 0; i < thrd_lists->size(); i++) {
1549                 /* Last SC fence in thread i */
1550                 ModelAction *last_sc_fence_thread_local = NULL;
1551                 if (int_to_id((int)i) != curr->get_tid())
1552                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1553
1554                 /* Last SC fence in thread i, before last SC fence in current thread */
1555                 ModelAction *last_sc_fence_thread_before = NULL;
1556                 if (last_sc_fence_local)
1557                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1558
1559                 /* Iterate over actions in thread, starting from most recent */
1560                 action_list_t *list = &(*thrd_lists)[i];
1561                 action_list_t::reverse_iterator rit;
1562                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1563                         ModelAction *act = *rit;
1564
1565                         if (act->is_write() && !act->equals(rf) && act != curr) {
1566                                 /* C++, Section 29.3 statement 5 */
1567                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1568                                                 *act < *last_sc_fence_thread_local) {
1569                                         added = mo_graph->addEdge(act, rf) || added;
1570                                         break;
1571                                 }
1572                                 /* C++, Section 29.3 statement 4 */
1573                                 else if (act->is_seqcst() && last_sc_fence_local &&
1574                                                 *act < *last_sc_fence_local) {
1575                                         added = mo_graph->addEdge(act, rf) || added;
1576                                         break;
1577                                 }
1578                                 /* C++, Section 29.3 statement 6 */
1579                                 else if (last_sc_fence_thread_before &&
1580                                                 *act < *last_sc_fence_thread_before) {
1581                                         added = mo_graph->addEdge(act, rf) || added;
1582                                         break;
1583                                 }
1584                         }
1585
1586                         /*
1587                          * Include at most one act per-thread that "happens
1588                          * before" curr. Don't consider reflexively.
1589                          */
1590                         if (act->happens_before(curr) && act != curr) {
1591                                 if (act->is_write()) {
1592                                         if (!act->equals(rf)) {
1593                                                 added = mo_graph->addEdge(act, rf) || added;
1594                                         }
1595                                 } else {
1596                                         const ModelAction *prevreadfrom = act->get_reads_from();
1597                                         //if the previous read is unresolved, keep going...
1598                                         if (prevreadfrom == NULL)
1599                                                 continue;
1600
1601                                         if (!prevreadfrom->equals(rf)) {
1602                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1603                                         }
1604                                 }
1605                                 break;
1606                         }
1607                 }
1608         }
1609
1610         /*
1611          * All compatible, thread-exclusive promises must be ordered after any
1612          * concrete loads from the same thread
1613          */
1614         for (unsigned int i = 0; i < promises->size(); i++)
1615                 if ((*promises)[i]->is_compatible_exclusive(curr))
1616                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1617
1618         return added;
1619 }
1620
1621 /**
1622  * Updates the mo_graph with the constraints imposed from the current write.
1623  *
1624  * Basic idea is the following: Go through each other thread and find
1625  * the lastest action that happened before our write.  Two cases:
1626  *
1627  * (1) The action is a write => that write must occur before
1628  * the current write
1629  *
1630  * (2) The action is a read => the write that that action read from
1631  * must occur before the current write.
1632  *
1633  * This method also handles two other issues:
1634  *
1635  * (I) Sequential Consistency: Making sure that if the current write is
1636  * seq_cst, that it occurs after the previous seq_cst write.
1637  *
1638  * (II) Sending the write back to non-synchronizing reads.
1639  *
1640  * @param curr The current action. Must be a write.
1641  * @return True if modification order edges were added; false otherwise
1642  */
1643 bool ModelChecker::w_modification_order(ModelAction *curr)
1644 {
1645         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1646         unsigned int i;
1647         bool added = false;
1648         ASSERT(curr->is_write());
1649
1650         if (curr->is_seqcst()) {
1651                 /* We have to at least see the last sequentially consistent write,
1652                          so we are initialized. */
1653                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1654                 if (last_seq_cst != NULL) {
1655                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1656                 }
1657         }
1658
1659         /* Last SC fence in the current thread */
1660         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1661
1662         /* Iterate over all threads */
1663         for (i = 0; i < thrd_lists->size(); i++) {
1664                 /* Last SC fence in thread i, before last SC fence in current thread */
1665                 ModelAction *last_sc_fence_thread_before = NULL;
1666                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1667                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1668
1669                 /* Iterate over actions in thread, starting from most recent */
1670                 action_list_t *list = &(*thrd_lists)[i];
1671                 action_list_t::reverse_iterator rit;
1672                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1673                         ModelAction *act = *rit;
1674                         if (act == curr) {
1675                                 /*
1676                                  * 1) If RMW and it actually read from something, then we
1677                                  * already have all relevant edges, so just skip to next
1678                                  * thread.
1679                                  *
1680                                  * 2) If RMW and it didn't read from anything, we should
1681                                  * whatever edge we can get to speed up convergence.
1682                                  *
1683                                  * 3) If normal write, we need to look at earlier actions, so
1684                                  * continue processing list.
1685                                  */
1686                                 if (curr->is_rmw()) {
1687                                         if (curr->get_reads_from() != NULL)
1688                                                 break;
1689                                         else
1690                                                 continue;
1691                                 } else
1692                                         continue;
1693                         }
1694
1695                         /* C++, Section 29.3 statement 7 */
1696                         if (last_sc_fence_thread_before && act->is_write() &&
1697                                         *act < *last_sc_fence_thread_before) {
1698                                 added = mo_graph->addEdge(act, curr) || added;
1699                                 break;
1700                         }
1701
1702                         /*
1703                          * Include at most one act per-thread that "happens
1704                          * before" curr
1705                          */
1706                         if (act->happens_before(curr)) {
1707                                 /*
1708                                  * Note: if act is RMW, just add edge:
1709                                  *   act --mo--> curr
1710                                  * The following edge should be handled elsewhere:
1711                                  *   readfrom(act) --mo--> act
1712                                  */
1713                                 if (act->is_write())
1714                                         added = mo_graph->addEdge(act, curr) || added;
1715                                 else if (act->is_read()) {
1716                                         //if previous read accessed a null, just keep going
1717                                         if (act->get_reads_from() == NULL)
1718                                                 continue;
1719                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1720                                 }
1721                                 break;
1722                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1723                                                      !act->same_thread(curr)) {
1724                                 /* We have an action that:
1725                                    (1) did not happen before us
1726                                    (2) is a read and we are a write
1727                                    (3) cannot synchronize with us
1728                                    (4) is in a different thread
1729                                    =>
1730                                    that read could potentially read from our write.  Note that
1731                                    these checks are overly conservative at this point, we'll
1732                                    do more checks before actually removing the
1733                                    pendingfuturevalue.
1734
1735                                  */
1736                                 if (thin_air_constraint_may_allow(curr, act)) {
1737                                         if (!is_infeasible())
1738                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1739                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1740                                                 add_future_value(curr, act);
1741                                 }
1742                         }
1743                 }
1744         }
1745
1746         /*
1747          * All compatible, thread-exclusive promises must be ordered after any
1748          * concrete stores to the same thread, or else they can be merged with
1749          * this store later
1750          */
1751         for (unsigned int i = 0; i < promises->size(); i++)
1752                 if ((*promises)[i]->is_compatible_exclusive(curr))
1753                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1754
1755         return added;
1756 }
1757
1758 /** Arbitrary reads from the future are not allowed.  Section 29.3
1759  * part 9 places some constraints.  This method checks one result of constraint
1760  * constraint.  Others require compiler support. */
1761 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1762 {
1763         if (!writer->is_rmw())
1764                 return true;
1765
1766         if (!reader->is_rmw())
1767                 return true;
1768
1769         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1770                 if (search == reader)
1771                         return false;
1772                 if (search->get_tid() == reader->get_tid() &&
1773                                 search->happens_before(reader))
1774                         break;
1775         }
1776
1777         return true;
1778 }
1779
1780 /**
1781  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1782  * some constraints. This method checks one the following constraint (others
1783  * require compiler support):
1784  *
1785  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1786  */
1787 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1788 {
1789         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1790         unsigned int i;
1791         /* Iterate over all threads */
1792         for (i = 0; i < thrd_lists->size(); i++) {
1793                 const ModelAction *write_after_read = NULL;
1794
1795                 /* Iterate over actions in thread, starting from most recent */
1796                 action_list_t *list = &(*thrd_lists)[i];
1797                 action_list_t::reverse_iterator rit;
1798                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1799                         ModelAction *act = *rit;
1800
1801                         /* Don't disallow due to act == reader */
1802                         if (!reader->happens_before(act) || reader == act)
1803                                 break;
1804                         else if (act->is_write())
1805                                 write_after_read = act;
1806                         else if (act->is_read() && act->get_reads_from() != NULL)
1807                                 write_after_read = act->get_reads_from();
1808                 }
1809
1810                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1811                         return false;
1812         }
1813         return true;
1814 }
1815
1816 /**
1817  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1818  * The ModelAction under consideration is expected to be taking part in
1819  * release/acquire synchronization as an object of the "reads from" relation.
1820  * Note that this can only provide release sequence support for RMW chains
1821  * which do not read from the future, as those actions cannot be traced until
1822  * their "promise" is fulfilled. Similarly, we may not even establish the
1823  * presence of a release sequence with certainty, as some modification order
1824  * constraints may be decided further in the future. Thus, this function
1825  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1826  * and a boolean representing certainty.
1827  *
1828  * @param rf The action that might be part of a release sequence. Must be a
1829  * write.
1830  * @param release_heads A pass-by-reference style return parameter. After
1831  * execution of this function, release_heads will contain the heads of all the
1832  * relevant release sequences, if any exists with certainty
1833  * @param pending A pass-by-reference style return parameter which is only used
1834  * when returning false (i.e., uncertain). Returns most information regarding
1835  * an uncertain release sequence, including any write operations that might
1836  * break the sequence.
1837  * @return true, if the ModelChecker is certain that release_heads is complete;
1838  * false otherwise
1839  */
1840 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1841                 rel_heads_list_t *release_heads,
1842                 struct release_seq *pending) const
1843 {
1844         /* Only check for release sequences if there are no cycles */
1845         if (mo_graph->checkForCycles())
1846                 return false;
1847
1848         while (rf) {
1849                 ASSERT(rf->is_write());
1850
1851                 if (rf->is_release())
1852                         release_heads->push_back(rf);
1853                 else if (rf->get_last_fence_release())
1854                         release_heads->push_back(rf->get_last_fence_release());
1855                 if (!rf->is_rmw())
1856                         break; /* End of RMW chain */
1857
1858                 /** @todo Need to be smarter here...  In the linux lock
1859                  * example, this will run to the beginning of the program for
1860                  * every acquire. */
1861                 /** @todo The way to be smarter here is to keep going until 1
1862                  * thread has a release preceded by an acquire and you've seen
1863                  *       both. */
1864
1865                 /* acq_rel RMW is a sufficient stopping condition */
1866                 if (rf->is_acquire() && rf->is_release())
1867                         return true; /* complete */
1868
1869                 rf = rf->get_reads_from();
1870         };
1871         if (!rf) {
1872                 /* read from future: need to settle this later */
1873                 pending->rf = NULL;
1874                 return false; /* incomplete */
1875         }
1876
1877         if (rf->is_release())
1878                 return true; /* complete */
1879
1880         /* else relaxed write
1881          * - check for fence-release in the same thread (29.8, stmt. 3)
1882          * - check modification order for contiguous subsequence
1883          *   -> rf must be same thread as release */
1884
1885         const ModelAction *fence_release = rf->get_last_fence_release();
1886         /* Synchronize with a fence-release unconditionally; we don't need to
1887          * find any more "contiguous subsequence..." for it */
1888         if (fence_release)
1889                 release_heads->push_back(fence_release);
1890
1891         int tid = id_to_int(rf->get_tid());
1892         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1893         action_list_t *list = &(*thrd_lists)[tid];
1894         action_list_t::const_reverse_iterator rit;
1895
1896         /* Find rf in the thread list */
1897         rit = std::find(list->rbegin(), list->rend(), rf);
1898         ASSERT(rit != list->rend());
1899
1900         /* Find the last {write,fence}-release */
1901         for (; rit != list->rend(); rit++) {
1902                 if (fence_release && *(*rit) < *fence_release)
1903                         break;
1904                 if ((*rit)->is_release())
1905                         break;
1906         }
1907         if (rit == list->rend()) {
1908                 /* No write-release in this thread */
1909                 return true; /* complete */
1910         } else if (fence_release && *(*rit) < *fence_release) {
1911                 /* The fence-release is more recent (and so, "stronger") than
1912                  * the most recent write-release */
1913                 return true; /* complete */
1914         } /* else, need to establish contiguous release sequence */
1915         ModelAction *release = *rit;
1916
1917         ASSERT(rf->same_thread(release));
1918
1919         pending->writes.clear();
1920
1921         bool certain = true;
1922         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1923                 if (id_to_int(rf->get_tid()) == (int)i)
1924                         continue;
1925                 list = &(*thrd_lists)[i];
1926
1927                 /* Can we ensure no future writes from this thread may break
1928                  * the release seq? */
1929                 bool future_ordered = false;
1930
1931                 ModelAction *last = get_last_action(int_to_id(i));
1932                 Thread *th = get_thread(int_to_id(i));
1933                 if ((last && rf->happens_before(last)) ||
1934                                 !is_enabled(th) ||
1935                                 th->is_complete())
1936                         future_ordered = true;
1937
1938                 ASSERT(!th->is_model_thread() || future_ordered);
1939
1940                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1941                         const ModelAction *act = *rit;
1942                         /* Reach synchronization -> this thread is complete */
1943                         if (act->happens_before(release))
1944                                 break;
1945                         if (rf->happens_before(act)) {
1946                                 future_ordered = true;
1947                                 continue;
1948                         }
1949
1950                         /* Only non-RMW writes can break release sequences */
1951                         if (!act->is_write() || act->is_rmw())
1952                                 continue;
1953
1954                         /* Check modification order */
1955                         if (mo_graph->checkReachable(rf, act)) {
1956                                 /* rf --mo--> act */
1957                                 future_ordered = true;
1958                                 continue;
1959                         }
1960                         if (mo_graph->checkReachable(act, release))
1961                                 /* act --mo--> release */
1962                                 break;
1963                         if (mo_graph->checkReachable(release, act) &&
1964                                       mo_graph->checkReachable(act, rf)) {
1965                                 /* release --mo-> act --mo--> rf */
1966                                 return true; /* complete */
1967                         }
1968                         /* act may break release sequence */
1969                         pending->writes.push_back(act);
1970                         certain = false;
1971                 }
1972                 if (!future_ordered)
1973                         certain = false; /* This thread is uncertain */
1974         }
1975
1976         if (certain) {
1977                 release_heads->push_back(release);
1978                 pending->writes.clear();
1979         } else {
1980                 pending->release = release;
1981                 pending->rf = rf;
1982         }
1983         return certain;
1984 }
1985
1986 /**
1987  * An interface for getting the release sequence head(s) with which a
1988  * given ModelAction must synchronize. This function only returns a non-empty
1989  * result when it can locate a release sequence head with certainty. Otherwise,
1990  * it may mark the internal state of the ModelChecker so that it will handle
1991  * the release sequence at a later time, causing @a acquire to update its
1992  * synchronization at some later point in execution.
1993  *
1994  * @param acquire The 'acquire' action that may synchronize with a release
1995  * sequence
1996  * @param read The read action that may read from a release sequence; this may
1997  * be the same as acquire, or else an earlier action in the same thread (i.e.,
1998  * when 'acquire' is a fence-acquire)
1999  * @param release_heads A pass-by-reference return parameter. Will be filled
2000  * with the head(s) of the release sequence(s), if they exists with certainty.
2001  * @see ModelChecker::release_seq_heads
2002  */
2003 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2004                 ModelAction *read, rel_heads_list_t *release_heads)
2005 {
2006         const ModelAction *rf = read->get_reads_from();
2007         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2008         sequence->acquire = acquire;
2009         sequence->read = read;
2010
2011         if (!release_seq_heads(rf, release_heads, sequence)) {
2012                 /* add act to 'lazy checking' list */
2013                 pending_rel_seqs->push_back(sequence);
2014         } else {
2015                 snapshot_free(sequence);
2016         }
2017 }
2018
2019 /**
2020  * Attempt to resolve all stashed operations that might synchronize with a
2021  * release sequence for a given location. This implements the "lazy" portion of
2022  * determining whether or not a release sequence was contiguous, since not all
2023  * modification order information is present at the time an action occurs.
2024  *
2025  * @param location The location/object that should be checked for release
2026  * sequence resolutions. A NULL value means to check all locations.
2027  * @param work_queue The work queue to which to add work items as they are
2028  * generated
2029  * @return True if any updates occurred (new synchronization, new mo_graph
2030  * edges)
2031  */
2032 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2033 {
2034         bool updated = false;
2035         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2036         while (it != pending_rel_seqs->end()) {
2037                 struct release_seq *pending = *it;
2038                 ModelAction *acquire = pending->acquire;
2039                 const ModelAction *read = pending->read;
2040
2041                 /* Only resolve sequences on the given location, if provided */
2042                 if (location && read->get_location() != location) {
2043                         it++;
2044                         continue;
2045                 }
2046
2047                 const ModelAction *rf = read->get_reads_from();
2048                 rel_heads_list_t release_heads;
2049                 bool complete;
2050                 complete = release_seq_heads(rf, &release_heads, pending);
2051                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2052                         if (!acquire->has_synchronized_with(release_heads[i])) {
2053                                 if (acquire->synchronize_with(release_heads[i]))
2054                                         updated = true;
2055                                 else
2056                                         set_bad_synchronization();
2057                         }
2058                 }
2059
2060                 if (updated) {
2061                         /* Re-check all pending release sequences */
2062                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2063                         /* Re-check read-acquire for mo_graph edges */
2064                         if (acquire->is_read())
2065                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2066
2067                         /* propagate synchronization to later actions */
2068                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2069                         for (; (*rit) != acquire; rit++) {
2070                                 ModelAction *propagate = *rit;
2071                                 if (acquire->happens_before(propagate)) {
2072                                         propagate->synchronize_with(acquire);
2073                                         /* Re-check 'propagate' for mo_graph edges */
2074                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2075                                 }
2076                         }
2077                 }
2078                 if (complete) {
2079                         it = pending_rel_seqs->erase(it);
2080                         snapshot_free(pending);
2081                 } else {
2082                         it++;
2083                 }
2084         }
2085
2086         // If we resolved promises or data races, see if we have realized a data race.
2087         checkDataRaces();
2088
2089         return updated;
2090 }
2091
2092 /**
2093  * Performs various bookkeeping operations for the current ModelAction. For
2094  * instance, adds action to the per-object, per-thread action vector and to the
2095  * action trace list of all thread actions.
2096  *
2097  * @param act is the ModelAction to add.
2098  */
2099 void ModelChecker::add_action_to_lists(ModelAction *act)
2100 {
2101         int tid = id_to_int(act->get_tid());
2102         ModelAction *uninit = NULL;
2103         int uninit_id = -1;
2104         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2105         if (list->empty() && act->is_atomic_var()) {
2106                 uninit = new_uninitialized_action(act->get_location());
2107                 uninit_id = id_to_int(uninit->get_tid());
2108                 list->push_back(uninit);
2109         }
2110         list->push_back(act);
2111
2112         action_trace->push_back(act);
2113         if (uninit)
2114                 action_trace->push_front(uninit);
2115
2116         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2117         if (tid >= (int)vec->size())
2118                 vec->resize(priv->next_thread_id);
2119         (*vec)[tid].push_back(act);
2120         if (uninit)
2121                 (*vec)[uninit_id].push_front(uninit);
2122
2123         if ((int)thrd_last_action->size() <= tid)
2124                 thrd_last_action->resize(get_num_threads());
2125         (*thrd_last_action)[tid] = act;
2126         if (uninit)
2127                 (*thrd_last_action)[uninit_id] = uninit;
2128
2129         if (act->is_fence() && act->is_release()) {
2130                 if ((int)thrd_last_fence_release->size() <= tid)
2131                         thrd_last_fence_release->resize(get_num_threads());
2132                 (*thrd_last_fence_release)[tid] = act;
2133         }
2134
2135         if (act->is_wait()) {
2136                 void *mutex_loc = (void *) act->get_value();
2137                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2138
2139                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2140                 if (tid >= (int)vec->size())
2141                         vec->resize(priv->next_thread_id);
2142                 (*vec)[tid].push_back(act);
2143         }
2144 }
2145
2146 /**
2147  * @brief Get the last action performed by a particular Thread
2148  * @param tid The thread ID of the Thread in question
2149  * @return The last action in the thread
2150  */
2151 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2152 {
2153         int threadid = id_to_int(tid);
2154         if (threadid < (int)thrd_last_action->size())
2155                 return (*thrd_last_action)[id_to_int(tid)];
2156         else
2157                 return NULL;
2158 }
2159
2160 /**
2161  * @brief Get the last fence release performed by a particular Thread
2162  * @param tid The thread ID of the Thread in question
2163  * @return The last fence release in the thread, if one exists; NULL otherwise
2164  */
2165 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2166 {
2167         int threadid = id_to_int(tid);
2168         if (threadid < (int)thrd_last_fence_release->size())
2169                 return (*thrd_last_fence_release)[id_to_int(tid)];
2170         else
2171                 return NULL;
2172 }
2173
2174 /**
2175  * Gets the last memory_order_seq_cst write (in the total global sequence)
2176  * performed on a particular object (i.e., memory location), not including the
2177  * current action.
2178  * @param curr The current ModelAction; also denotes the object location to
2179  * check
2180  * @return The last seq_cst write
2181  */
2182 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2183 {
2184         void *location = curr->get_location();
2185         action_list_t *list = get_safe_ptr_action(obj_map, location);
2186         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2187         action_list_t::reverse_iterator rit;
2188         for (rit = list->rbegin(); rit != list->rend(); rit++)
2189                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2190                         return *rit;
2191         return NULL;
2192 }
2193
2194 /**
2195  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2196  * performed in a particular thread, prior to a particular fence.
2197  * @param tid The ID of the thread to check
2198  * @param before_fence The fence from which to begin the search; if NULL, then
2199  * search for the most recent fence in the thread.
2200  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2201  */
2202 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2203 {
2204         /* All fences should have NULL location */
2205         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2206         action_list_t::reverse_iterator rit = list->rbegin();
2207
2208         if (before_fence) {
2209                 for (; rit != list->rend(); rit++)
2210                         if (*rit == before_fence)
2211                                 break;
2212
2213                 ASSERT(*rit == before_fence);
2214                 rit++;
2215         }
2216
2217         for (; rit != list->rend(); rit++)
2218                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2219                         return *rit;
2220         return NULL;
2221 }
2222
2223 /**
2224  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2225  * location). This function identifies the mutex according to the current
2226  * action, which is presumed to perform on the same mutex.
2227  * @param curr The current ModelAction; also denotes the object location to
2228  * check
2229  * @return The last unlock operation
2230  */
2231 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2232 {
2233         void *location = curr->get_location();
2234         action_list_t *list = get_safe_ptr_action(obj_map, location);
2235         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2236         action_list_t::reverse_iterator rit;
2237         for (rit = list->rbegin(); rit != list->rend(); rit++)
2238                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2239                         return *rit;
2240         return NULL;
2241 }
2242
2243 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2244 {
2245         ModelAction *parent = get_last_action(tid);
2246         if (!parent)
2247                 parent = get_thread(tid)->get_creation();
2248         return parent;
2249 }
2250
2251 /**
2252  * Returns the clock vector for a given thread.
2253  * @param tid The thread whose clock vector we want
2254  * @return Desired clock vector
2255  */
2256 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2257 {
2258         return get_parent_action(tid)->get_cv();
2259 }
2260
2261 /**
2262  * Resolve a set of Promises with a current write. The set is provided in the
2263  * Node corresponding to @a write.
2264  * @param write The ModelAction that is fulfilling Promises
2265  * @return True if promises were resolved; false otherwise
2266  */
2267 bool ModelChecker::resolve_promises(ModelAction *write)
2268 {
2269         bool haveResolved = false;
2270         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2271         promise_list_t mustResolve, resolved;
2272
2273         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2274                 Promise *promise = (*promises)[promise_index];
2275                 if (write->get_node()->get_promise(i)) {
2276                         ModelAction *read = promise->get_action();
2277                         read_from(read, write);
2278                         //Make sure the promise's value matches the write's value
2279                         ASSERT(promise->is_compatible(write));
2280                         mo_graph->resolvePromise(read, write, &mustResolve);
2281
2282                         resolved.push_back(promise);
2283                         promises->erase(promises->begin() + promise_index);
2284                         actions_to_check.push_back(read);
2285
2286                         haveResolved = true;
2287                 } else
2288                         promise_index++;
2289         }
2290
2291         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2292                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2293                                 == resolved.end())
2294                         priv->failed_promise = true;
2295         }
2296         for (unsigned int i = 0; i < resolved.size(); i++)
2297                 delete resolved[i];
2298         //Check whether reading these writes has made threads unable to
2299         //resolve promises
2300
2301         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2302                 ModelAction *read = actions_to_check[i];
2303                 mo_check_promises(read, true);
2304         }
2305
2306         return haveResolved;
2307 }
2308
2309 /**
2310  * Compute the set of promises that could potentially be satisfied by this
2311  * action. Note that the set computation actually appears in the Node, not in
2312  * ModelChecker.
2313  * @param curr The ModelAction that may satisfy promises
2314  */
2315 void ModelChecker::compute_promises(ModelAction *curr)
2316 {
2317         for (unsigned int i = 0; i < promises->size(); i++) {
2318                 Promise *promise = (*promises)[i];
2319                 const ModelAction *act = promise->get_action();
2320                 if (!act->happens_before(curr) &&
2321                                 act->is_read() &&
2322                                 !act->could_synchronize_with(curr) &&
2323                                 !act->same_thread(curr) &&
2324                                 act->get_location() == curr->get_location() &&
2325                                 promise->get_value() == curr->get_value()) {
2326                         curr->get_node()->set_promise(i, act->is_rmw());
2327                 }
2328         }
2329 }
2330
2331 /** Checks promises in response to change in ClockVector Threads. */
2332 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2333 {
2334         for (unsigned int i = 0; i < promises->size(); i++) {
2335                 Promise *promise = (*promises)[i];
2336                 const ModelAction *act = promise->get_action();
2337                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2338                                 merge_cv->synchronized_since(act)) {
2339                         if (promise->eliminate_thread(tid)) {
2340                                 //Promise has failed
2341                                 priv->failed_promise = true;
2342                                 return;
2343                         }
2344                 }
2345         }
2346 }
2347
2348 void ModelChecker::check_promises_thread_disabled()
2349 {
2350         for (unsigned int i = 0; i < promises->size(); i++) {
2351                 Promise *promise = (*promises)[i];
2352                 if (promise->has_failed()) {
2353                         priv->failed_promise = true;
2354                         return;
2355                 }
2356         }
2357 }
2358
2359 /**
2360  * @brief Checks promises in response to addition to modification order for
2361  * threads.
2362  *
2363  * We test whether threads are still available for satisfying promises after an
2364  * addition to our modification order constraints. Those that are unavailable
2365  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2366  * that promise has failed.
2367  *
2368  * @param act The ModelAction which updated the modification order
2369  * @param is_read_check Should be true if act is a read and we must check for
2370  * updates to the store from which it read (there is a distinction here for
2371  * RMW's, which are both a load and a store)
2372  */
2373 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2374 {
2375         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2376
2377         for (unsigned int i = 0; i < promises->size(); i++) {
2378                 Promise *promise = (*promises)[i];
2379                 const ModelAction *pread = promise->get_action();
2380
2381                 // Is this promise on the same location?
2382                 if (!pread->same_var(write))
2383                         continue;
2384
2385                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2386                         priv->failed_promise = true;
2387                         return;
2388                 }
2389
2390                 // Don't do any lookups twice for the same thread
2391                 if (!promise->thread_is_available(act->get_tid()))
2392                         continue;
2393
2394                 if (mo_graph->checkReachable(promise, write)) {
2395                         if (mo_graph->checkPromise(write, promise)) {
2396                                 priv->failed_promise = true;
2397                                 return;
2398                         }
2399                 }
2400         }
2401 }
2402
2403 /**
2404  * Compute the set of writes that may break the current pending release
2405  * sequence. This information is extracted from previou release sequence
2406  * calculations.
2407  *
2408  * @param curr The current ModelAction. Must be a release sequence fixup
2409  * action.
2410  */
2411 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2412 {
2413         if (pending_rel_seqs->empty())
2414                 return;
2415
2416         struct release_seq *pending = pending_rel_seqs->back();
2417         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2418                 const ModelAction *write = pending->writes[i];
2419                 curr->get_node()->add_relseq_break(write);
2420         }
2421
2422         /* NULL means don't break the sequence; just synchronize */
2423         curr->get_node()->add_relseq_break(NULL);
2424 }
2425
2426 /**
2427  * Build up an initial set of all past writes that this 'read' action may read
2428  * from. This set is determined by the clock vector's "happens before"
2429  * relationship.
2430  * @param curr is the current ModelAction that we are exploring; it must be a
2431  * 'read' operation.
2432  */
2433 void ModelChecker::build_reads_from_past(ModelAction *curr)
2434 {
2435         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2436         unsigned int i;
2437         ASSERT(curr->is_read());
2438
2439         ModelAction *last_sc_write = NULL;
2440
2441         if (curr->is_seqcst())
2442                 last_sc_write = get_last_seq_cst_write(curr);
2443
2444         /* Iterate over all threads */
2445         for (i = 0; i < thrd_lists->size(); i++) {
2446                 /* Iterate over actions in thread, starting from most recent */
2447                 action_list_t *list = &(*thrd_lists)[i];
2448                 action_list_t::reverse_iterator rit;
2449                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2450                         ModelAction *act = *rit;
2451
2452                         /* Only consider 'write' actions */
2453                         if (!act->is_write() || act == curr)
2454                                 continue;
2455
2456                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2457                         bool allow_read = true;
2458
2459                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2460                                 allow_read = false;
2461                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2462                                 allow_read = false;
2463
2464                         if (allow_read) {
2465                                 /* Only add feasible reads */
2466                                 mo_graph->startChanges();
2467                                 r_modification_order(curr, act);
2468                                 if (!is_infeasible())
2469                                         curr->get_node()->add_read_from(act);
2470                                 mo_graph->rollbackChanges();
2471                         }
2472
2473                         /* Include at most one act per-thread that "happens before" curr */
2474                         if (act->happens_before(curr))
2475                                 break;
2476                 }
2477         }
2478         /* We may find no valid may-read-from only if the execution is doomed */
2479         if (!curr->get_node()->get_read_from_size()) {
2480                 priv->no_valid_reads = true;
2481                 set_assert();
2482         }
2483
2484         if (DBG_ENABLED()) {
2485                 model_print("Reached read action:\n");
2486                 curr->print();
2487                 model_print("Printing may_read_from\n");
2488                 curr->get_node()->print_may_read_from();
2489                 model_print("End printing may_read_from\n");
2490         }
2491 }
2492
2493 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2494 {
2495         while (true) {
2496                 /* UNINIT actions don't have a Node, and they never sleep */
2497                 if (write->is_uninitialized())
2498                         return true;
2499                 Node *prevnode = write->get_node()->get_parent();
2500
2501                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2502                 if (write->is_release() && thread_sleep)
2503                         return true;
2504                 if (!write->is_rmw()) {
2505                         return false;
2506                 }
2507                 if (write->get_reads_from() == NULL)
2508                         return true;
2509                 write = write->get_reads_from();
2510         }
2511 }
2512
2513 /**
2514  * @brief Create a new action representing an uninitialized atomic
2515  * @param location The memory location of the atomic object
2516  * @return A pointer to a new ModelAction
2517  */
2518 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2519 {
2520         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2521         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2522         act->create_cv(NULL);
2523         return act;
2524 }
2525
2526 static void print_list(action_list_t *list)
2527 {
2528         action_list_t::iterator it;
2529
2530         model_print("---------------------------------------------------------------------\n");
2531
2532         unsigned int hash = 0;
2533
2534         for (it = list->begin(); it != list->end(); it++) {
2535                 (*it)->print();
2536                 hash = hash^(hash<<3)^((*it)->hash());
2537         }
2538         model_print("HASH %u\n", hash);
2539         model_print("---------------------------------------------------------------------\n");
2540 }
2541
2542 #if SUPPORT_MOD_ORDER_DUMP
2543 void ModelChecker::dumpGraph(char *filename) const
2544 {
2545         char buffer[200];
2546         sprintf(buffer, "%s.dot", filename);
2547         FILE *file = fopen(buffer, "w");
2548         fprintf(file, "digraph %s {\n", filename);
2549         mo_graph->dumpNodes(file);
2550         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2551
2552         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2553                 ModelAction *action = *it;
2554                 if (action->is_read()) {
2555                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2556                         if (action->get_reads_from() != NULL)
2557                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2558                 }
2559                 if (thread_array[action->get_tid()] != NULL) {
2560                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2561                 }
2562
2563                 thread_array[action->get_tid()] = action;
2564         }
2565         fprintf(file, "}\n");
2566         model_free(thread_array);
2567         fclose(file);
2568 }
2569 #endif
2570
2571 /** @brief Prints an execution trace summary. */
2572 void ModelChecker::print_summary() const
2573 {
2574 #if SUPPORT_MOD_ORDER_DUMP
2575         char buffername[100];
2576         sprintf(buffername, "exec%04u", stats.num_total);
2577         mo_graph->dumpGraphToFile(buffername);
2578         sprintf(buffername, "graph%04u", stats.num_total);
2579         dumpGraph(buffername);
2580 #endif
2581
2582         model_print("Execution %d:", stats.num_total);
2583         if (isfeasibleprefix())
2584                 model_print("\n");
2585         else
2586                 print_infeasibility(" INFEASIBLE");
2587         print_list(action_trace);
2588         model_print("\n");
2589 }
2590
2591 /**
2592  * Add a Thread to the system for the first time. Should only be called once
2593  * per thread.
2594  * @param t The Thread to add
2595  */
2596 void ModelChecker::add_thread(Thread *t)
2597 {
2598         thread_map->put(id_to_int(t->get_id()), t);
2599         scheduler->add_thread(t);
2600 }
2601
2602 /**
2603  * Removes a thread from the scheduler.
2604  * @param the thread to remove.
2605  */
2606 void ModelChecker::remove_thread(Thread *t)
2607 {
2608         scheduler->remove_thread(t);
2609 }
2610
2611 /**
2612  * @brief Get a Thread reference by its ID
2613  * @param tid The Thread's ID
2614  * @return A Thread reference
2615  */
2616 Thread * ModelChecker::get_thread(thread_id_t tid) const
2617 {
2618         return thread_map->get(id_to_int(tid));
2619 }
2620
2621 /**
2622  * @brief Get a reference to the Thread in which a ModelAction was executed
2623  * @param act The ModelAction
2624  * @return A Thread reference
2625  */
2626 Thread * ModelChecker::get_thread(const ModelAction *act) const
2627 {
2628         return get_thread(act->get_tid());
2629 }
2630
2631 /**
2632  * @brief Check if a Thread is currently enabled
2633  * @param t The Thread to check
2634  * @return True if the Thread is currently enabled
2635  */
2636 bool ModelChecker::is_enabled(Thread *t) const
2637 {
2638         return scheduler->is_enabled(t);
2639 }
2640
2641 /**
2642  * @brief Check if a Thread is currently enabled
2643  * @param tid The ID of the Thread to check
2644  * @return True if the Thread is currently enabled
2645  */
2646 bool ModelChecker::is_enabled(thread_id_t tid) const
2647 {
2648         return scheduler->is_enabled(tid);
2649 }
2650
2651 /**
2652  * Switch from a user-context to the "master thread" context (a.k.a. system
2653  * context). This switch is made with the intention of exploring a particular
2654  * model-checking action (described by a ModelAction object). Must be called
2655  * from a user-thread context.
2656  *
2657  * @param act The current action that will be explored. May be NULL only if
2658  * trace is exiting via an assertion (see ModelChecker::set_assert and
2659  * ModelChecker::has_asserted).
2660  * @return Return the value returned by the current action
2661  */
2662 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2663 {
2664         DBG();
2665         Thread *old = thread_current();
2666         old->set_pending(act);
2667         if (Thread::swap(old, &system_context) < 0) {
2668                 perror("swap threads");
2669                 exit(EXIT_FAILURE);
2670         }
2671         return old->get_return_value();
2672 }
2673
2674 /**
2675  * Takes the next step in the execution, if possible.
2676  * @param curr The current step to take
2677  * @return Returns the next Thread to run, if any; NULL if this execution
2678  * should terminate
2679  */
2680 Thread * ModelChecker::take_step(ModelAction *curr)
2681 {
2682         Thread *curr_thrd = get_thread(curr);
2683         ASSERT(curr_thrd->get_state() == THREAD_READY);
2684
2685         curr = check_current_action(curr);
2686
2687         /* Infeasible -> don't take any more steps */
2688         if (is_infeasible())
2689                 return NULL;
2690         else if (isfeasibleprefix() && have_bug_reports()) {
2691                 set_assert();
2692                 return NULL;
2693         }
2694
2695         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2696                 return NULL;
2697
2698         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2699                 scheduler->remove_thread(curr_thrd);
2700
2701         Thread *next_thrd = get_next_thread(curr);
2702         next_thrd = scheduler->next_thread(next_thrd);
2703
2704         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2705                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2706
2707         /*
2708          * Launch end-of-execution release sequence fixups only when there are:
2709          *
2710          * (1) no more user threads to run (or when execution replay chooses
2711          *     the 'model_thread')
2712          * (2) pending release sequences
2713          * (3) pending assertions (i.e., data races)
2714          * (4) no pending promises
2715          */
2716         if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2717                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2718                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2719                                 pending_rel_seqs->size());
2720                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2721                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2722                                 model_thread);
2723                 model_thread->set_pending(fixup);
2724                 return model_thread;
2725         }
2726
2727         /* next_thrd == NULL -> don't take any more steps */
2728         if (!next_thrd)
2729                 return NULL;
2730
2731         return next_thrd;
2732 }
2733
2734 /** Wrapper to run the user's main function, with appropriate arguments */
2735 void user_main_wrapper(void *)
2736 {
2737         user_main(model->params.argc, model->params.argv);
2738 }
2739
2740 /** @brief Run ModelChecker for the user program */
2741 void ModelChecker::run()
2742 {
2743         do {
2744                 thrd_t user_thread;
2745                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2746                 add_thread(t);
2747
2748                 do {
2749                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2750                                 thread_id_t tid = int_to_id(i);
2751                                 Thread *thr = get_thread(tid);
2752                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2753                                         scheduler->next_thread(thr);
2754                                         Thread::swap(&system_context, thr);
2755                                 }
2756                         }
2757
2758                         /* Catch assertions from prior take_step or from
2759                          * between-ModelAction bugs (e.g., data races) */
2760                         if (has_asserted())
2761                                 break;
2762
2763                         /* Consume the next action for a Thread */
2764                         ModelAction *curr = t->get_pending();
2765                         t->set_pending(NULL);
2766                         t = take_step(curr);
2767                 } while (t && !t->is_model_thread());
2768                 /** @TODO Re-write release sequence fixups here */
2769         } while (next_execution());
2770
2771         print_stats();
2772 }