d19fdf9d12c1d809a7e72b4d6974b65f3be674a3
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         snapshot_backtrack_before(0);
163 }
164
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
167 {
168         return priv->next_thread_id++;
169 }
170
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
173 {
174         return priv->next_thread_id;
175 }
176
177 /**
178  * Must be called from user-thread context (e.g., through the global
179  * thread_current() interface)
180  *
181  * @return The currently executing Thread.
182  */
183 Thread * ModelChecker::get_current_thread() const
184 {
185         return scheduler->get_current_thread();
186 }
187
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
190 {
191         return ++priv->used_sequence_numbers;
192 }
193
194 Node * ModelChecker::get_curr_node() const
195 {
196         return node_stack->get_head();
197 }
198
199 /**
200  * @brief Choose the next thread to execute.
201  *
202  * This function chooses the next thread that should execute. It can force the
203  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205  * The model-checker may have no preference regarding the next thread (i.e.,
206  * when exploring a new execution ordering), in which case this will return
207  * NULL.
208  * @param curr The current ModelAction. This action might guide the choice of
209  * next thread.
210  * @return The next thread to run. If the model-checker has no preference, NULL.
211  */
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
213 {
214         thread_id_t tid;
215
216         if (curr != NULL) {
217                 /* Do not split atomic actions. */
218                 if (curr->is_rmwr())
219                         return thread_current();
220                 else if (curr->get_type() == THREAD_CREATE)
221                         return curr->get_thread_operand();
222         }
223
224         /* Have we completed exploring the preselected path? */
225         if (diverge == NULL)
226                 return NULL;
227
228         /* Else, we are trying to replay an execution */
229         ModelAction *next = node_stack->get_next()->get_action();
230
231         if (next == diverge) {
232                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
233                         earliest_diverge = diverge;
234
235                 Node *nextnode = next->get_node();
236                 Node *prevnode = nextnode->get_parent();
237                 scheduler->update_sleep_set(prevnode);
238
239                 /* Reached divergence point */
240                 if (nextnode->increment_misc()) {
241                         /* The next node will try to satisfy a different misc_index values. */
242                         tid = next->get_tid();
243                         node_stack->pop_restofstack(2);
244                 } else if (nextnode->increment_promise()) {
245                         /* The next node will try to satisfy a different set of promises. */
246                         tid = next->get_tid();
247                         node_stack->pop_restofstack(2);
248                 } else if (nextnode->increment_read_from()) {
249                         /* The next node will read from a different value. */
250                         tid = next->get_tid();
251                         node_stack->pop_restofstack(2);
252                 } else if (nextnode->increment_future_value()) {
253                         /* The next node will try to read from a different future value. */
254                         tid = next->get_tid();
255                         node_stack->pop_restofstack(2);
256                 } else if (nextnode->increment_relseq_break()) {
257                         /* The next node will try to resolve a release sequence differently */
258                         tid = next->get_tid();
259                         node_stack->pop_restofstack(2);
260                 } else {
261                         ASSERT(prevnode);
262                         /* Make a different thread execute for next step */
263                         scheduler->add_sleep(get_thread(next->get_tid()));
264                         tid = prevnode->get_next_backtrack();
265                         /* Make sure the backtracked thread isn't sleeping. */
266                         node_stack->pop_restofstack(1);
267                         if (diverge == earliest_diverge) {
268                                 earliest_diverge = prevnode->get_action();
269                         }
270                 }
271                 /* The correct sleep set is in the parent node. */
272                 execute_sleep_set();
273
274                 DEBUG("*** Divergence point ***\n");
275
276                 diverge = NULL;
277         } else {
278                 tid = next->get_tid();
279         }
280         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
281         ASSERT(tid != THREAD_ID_T_NONE);
282         return thread_map->get(id_to_int(tid));
283 }
284
285 /**
286  * We need to know what the next actions of all threads in the sleep
287  * set will be.  This method computes them and stores the actions at
288  * the corresponding thread object's pending action.
289  */
290
291 void ModelChecker::execute_sleep_set()
292 {
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *thr = get_thread(tid);
296                 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
297                         scheduler->next_thread(thr);
298                         Thread::swap(&system_context, thr);
299                         thr->get_pending()->set_sleep_flag();
300                 }
301         }
302 }
303
304 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
305 {
306         for (unsigned int i = 0; i < get_num_threads(); i++) {
307                 Thread *thr = get_thread(int_to_id(i));
308                 if (scheduler->is_sleep_set(thr)) {
309                         ModelAction *pending_act = thr->get_pending();
310                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
311                                 //Remove this thread from sleep set
312                                 scheduler->remove_sleep(thr);
313                 }
314         }
315 }
316
317 /** @brief Alert the model-checker that an incorrectly-ordered
318  * synchronization was made */
319 void ModelChecker::set_bad_synchronization()
320 {
321         priv->bad_synchronization = true;
322 }
323
324 bool ModelChecker::has_asserted() const
325 {
326         return priv->asserted;
327 }
328
329 void ModelChecker::set_assert()
330 {
331         priv->asserted = true;
332 }
333
334 /**
335  * Check if we are in a deadlock. Should only be called at the end of an
336  * execution, although it should not give false positives in the middle of an
337  * execution (there should be some ENABLED thread).
338  *
339  * @return True if program is in a deadlock; false otherwise
340  */
341 bool ModelChecker::is_deadlocked() const
342 {
343         bool blocking_threads = false;
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 thread_id_t tid = int_to_id(i);
346                 if (is_enabled(tid))
347                         return false;
348                 Thread *t = get_thread(tid);
349                 if (!t->is_model_thread() && t->get_pending())
350                         blocking_threads = true;
351         }
352         return blocking_threads;
353 }
354
355 /**
356  * Check if this is a complete execution. That is, have all thread completed
357  * execution (rather than exiting because sleep sets have forced a redundant
358  * execution).
359  *
360  * @return True if the execution is complete.
361  */
362 bool ModelChecker::is_complete_execution() const
363 {
364         for (unsigned int i = 0; i < get_num_threads(); i++)
365                 if (is_enabled(int_to_id(i)))
366                         return false;
367         return true;
368 }
369
370 /**
371  * @brief Assert a bug in the executing program.
372  *
373  * Use this function to assert any sort of bug in the user program. If the
374  * current trace is feasible (actually, a prefix of some feasible execution),
375  * then this execution will be aborted, printing the appropriate message. If
376  * the current trace is not yet feasible, the error message will be stashed and
377  * printed if the execution ever becomes feasible.
378  *
379  * @param msg Descriptive message for the bug (do not include newline char)
380  * @return True if bug is immediately-feasible
381  */
382 bool ModelChecker::assert_bug(const char *msg)
383 {
384         priv->bugs.push_back(new bug_message(msg));
385
386         if (isfeasibleprefix()) {
387                 set_assert();
388                 return true;
389         }
390         return false;
391 }
392
393 /**
394  * @brief Assert a bug in the executing program, asserted by a user thread
395  * @see ModelChecker::assert_bug
396  * @param msg Descriptive message for the bug (do not include newline char)
397  */
398 void ModelChecker::assert_user_bug(const char *msg)
399 {
400         /* If feasible bug, bail out now */
401         if (assert_bug(msg))
402                 switch_to_master(NULL);
403 }
404
405 /** @return True, if any bugs have been reported for this execution */
406 bool ModelChecker::have_bug_reports() const
407 {
408         return priv->bugs.size() != 0;
409 }
410
411 /** @brief Print bug report listing for this execution (if any bugs exist) */
412 void ModelChecker::print_bugs() const
413 {
414         if (have_bug_reports()) {
415                 model_print("Bug report: %zu bug%s detected\n",
416                                 priv->bugs.size(),
417                                 priv->bugs.size() > 1 ? "s" : "");
418                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
419                         priv->bugs[i]->print();
420         }
421 }
422
423 /**
424  * @brief Record end-of-execution stats
425  *
426  * Must be run when exiting an execution. Records various stats.
427  * @see struct execution_stats
428  */
429 void ModelChecker::record_stats()
430 {
431         stats.num_total++;
432         if (!isfeasibleprefix())
433                 stats.num_infeasible++;
434         else if (have_bug_reports())
435                 stats.num_buggy_executions++;
436         else if (is_complete_execution())
437                 stats.num_complete++;
438         else
439                 stats.num_redundant++;
440 }
441
442 /** @brief Print execution stats */
443 void ModelChecker::print_stats() const
444 {
445         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
446         model_print("Number of redundant executions: %d\n", stats.num_redundant);
447         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
448         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
449         model_print("Total executions: %d\n", stats.num_total);
450         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
451 }
452
453 /**
454  * @brief End-of-exeuction print
455  * @param printbugs Should any existing bugs be printed?
456  */
457 void ModelChecker::print_execution(bool printbugs) const
458 {
459         print_program_output();
460
461         if (DBG_ENABLED() || params.verbose) {
462                 model_print("Earliest divergence point since last feasible execution:\n");
463                 if (earliest_diverge)
464                         earliest_diverge->print();
465                 else
466                         model_print("(Not set)\n");
467
468                 model_print("\n");
469                 print_stats();
470         }
471
472         /* Don't print invalid bugs */
473         if (printbugs)
474                 print_bugs();
475
476         model_print("\n");
477         print_summary();
478 }
479
480 /**
481  * Queries the model-checker for more executions to explore and, if one
482  * exists, resets the model-checker state to execute a new execution.
483  *
484  * @return If there are more executions to explore, return true. Otherwise,
485  * return false.
486  */
487 bool ModelChecker::next_execution()
488 {
489         DBG();
490         /* Is this execution a feasible execution that's worth bug-checking? */
491         bool complete = isfeasibleprefix() && (is_complete_execution() ||
492                         have_bug_reports());
493
494         /* End-of-execution bug checks */
495         if (complete) {
496                 if (is_deadlocked())
497                         assert_bug("Deadlock detected");
498
499                 checkDataRaces();
500         }
501
502         record_stats();
503
504         /* Output */
505         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
506                 print_execution(complete);
507         else
508                 clear_program_output();
509
510         if (complete)
511                 earliest_diverge = NULL;
512
513         if ((diverge = get_next_backtrack()) == NULL)
514                 return false;
515
516         if (DBG_ENABLED()) {
517                 model_print("Next execution will diverge at:\n");
518                 diverge->print();
519         }
520
521         reset_to_initial_state();
522         return true;
523 }
524
525 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
526 {
527         switch (act->get_type()) {
528         case ATOMIC_FENCE:
529         case ATOMIC_READ:
530         case ATOMIC_WRITE:
531         case ATOMIC_RMW: {
532                 /* Optimization: relaxed operations don't need backtracking */
533                 if (act->is_relaxed())
534                         return NULL;
535                 /* linear search: from most recent to oldest */
536                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
537                 action_list_t::reverse_iterator rit;
538                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
539                         ModelAction *prev = *rit;
540                         if (prev->could_synchronize_with(act))
541                                 return prev;
542                 }
543                 break;
544         }
545         case ATOMIC_LOCK:
546         case ATOMIC_TRYLOCK: {
547                 /* linear search: from most recent to oldest */
548                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
549                 action_list_t::reverse_iterator rit;
550                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
551                         ModelAction *prev = *rit;
552                         if (act->is_conflicting_lock(prev))
553                                 return prev;
554                 }
555                 break;
556         }
557         case ATOMIC_UNLOCK: {
558                 /* linear search: from most recent to oldest */
559                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
560                 action_list_t::reverse_iterator rit;
561                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
562                         ModelAction *prev = *rit;
563                         if (!act->same_thread(prev) && prev->is_failed_trylock())
564                                 return prev;
565                 }
566                 break;
567         }
568         case ATOMIC_WAIT: {
569                 /* linear search: from most recent to oldest */
570                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
571                 action_list_t::reverse_iterator rit;
572                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
573                         ModelAction *prev = *rit;
574                         if (!act->same_thread(prev) && prev->is_failed_trylock())
575                                 return prev;
576                         if (!act->same_thread(prev) && prev->is_notify())
577                                 return prev;
578                 }
579                 break;
580         }
581
582         case ATOMIC_NOTIFY_ALL:
583         case ATOMIC_NOTIFY_ONE: {
584                 /* linear search: from most recent to oldest */
585                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
586                 action_list_t::reverse_iterator rit;
587                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
588                         ModelAction *prev = *rit;
589                         if (!act->same_thread(prev) && prev->is_wait())
590                                 return prev;
591                 }
592                 break;
593         }
594         default:
595                 break;
596         }
597         return NULL;
598 }
599
600 /** This method finds backtracking points where we should try to
601  * reorder the parameter ModelAction against.
602  *
603  * @param the ModelAction to find backtracking points for.
604  */
605 void ModelChecker::set_backtracking(ModelAction *act)
606 {
607         Thread *t = get_thread(act);
608         ModelAction *prev = get_last_conflict(act);
609         if (prev == NULL)
610                 return;
611
612         Node *node = prev->get_node()->get_parent();
613
614         int low_tid, high_tid;
615         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
616                 low_tid = id_to_int(act->get_tid());
617                 high_tid = low_tid + 1;
618         } else {
619                 low_tid = 0;
620                 high_tid = get_num_threads();
621         }
622
623         for (int i = low_tid; i < high_tid; i++) {
624                 thread_id_t tid = int_to_id(i);
625
626                 /* Make sure this thread can be enabled here. */
627                 if (i >= node->get_num_threads())
628                         break;
629
630                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
631                 if (node->enabled_status(tid) != THREAD_ENABLED)
632                         continue;
633
634                 /* Check if this has been explored already */
635                 if (node->has_been_explored(tid))
636                         continue;
637
638                 /* See if fairness allows */
639                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
640                         bool unfair = false;
641                         for (int t = 0; t < node->get_num_threads(); t++) {
642                                 thread_id_t tother = int_to_id(t);
643                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
644                                         unfair = true;
645                                         break;
646                                 }
647                         }
648                         if (unfair)
649                                 continue;
650                 }
651                 /* Cache the latest backtracking point */
652                 set_latest_backtrack(prev);
653
654                 /* If this is a new backtracking point, mark the tree */
655                 if (!node->set_backtrack(tid))
656                         continue;
657                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
658                                         id_to_int(prev->get_tid()),
659                                         id_to_int(t->get_id()));
660                 if (DBG_ENABLED()) {
661                         prev->print();
662                         act->print();
663                 }
664         }
665 }
666
667 /**
668  * @brief Cache the a backtracking point as the "most recent", if eligible
669  *
670  * Note that this does not prepare the NodeStack for this backtracking
671  * operation, it only caches the action on a per-execution basis
672  *
673  * @param act The operation at which we should explore a different next action
674  * (i.e., backtracking point)
675  * @return True, if this action is now the most recent backtracking point;
676  * false otherwise
677  */
678 bool ModelChecker::set_latest_backtrack(ModelAction *act)
679 {
680         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
681                 priv->next_backtrack = act;
682                 return true;
683         }
684         return false;
685 }
686
687 /**
688  * Returns last backtracking point. The model checker will explore a different
689  * path for this point in the next execution.
690  * @return The ModelAction at which the next execution should diverge.
691  */
692 ModelAction * ModelChecker::get_next_backtrack()
693 {
694         ModelAction *next = priv->next_backtrack;
695         priv->next_backtrack = NULL;
696         return next;
697 }
698
699 /**
700  * Processes a read or rmw model action.
701  * @param curr is the read model action to process.
702  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
703  * @return True if processing this read updates the mo_graph.
704  */
705 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
706 {
707         uint64_t value = VALUE_NONE;
708         bool updated = false;
709         while (true) {
710                 const ModelAction *reads_from = curr->get_node()->get_read_from();
711                 if (reads_from != NULL) {
712                         mo_graph->startChanges();
713
714                         value = reads_from->get_value();
715                         bool r_status = false;
716
717                         if (!second_part_of_rmw) {
718                                 check_recency(curr, reads_from);
719                                 r_status = r_modification_order(curr, reads_from);
720                         }
721
722                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
723                                 mo_graph->rollbackChanges();
724                                 priv->too_many_reads = false;
725                                 continue;
726                         }
727
728                         read_from(curr, reads_from);
729                         mo_graph->commitChanges();
730                         mo_check_promises(curr, true);
731
732                         updated |= r_status;
733                 } else if (!second_part_of_rmw) {
734                         /* Read from future value */
735                         struct future_value fv = curr->get_node()->get_future_value();
736                         Promise *promise = new Promise(curr, fv);
737                         value = fv.value;
738                         curr->set_read_from_promise(promise);
739                         promises->push_back(promise);
740                         mo_graph->startChanges();
741                         updated = r_modification_order(curr, promise);
742                         mo_graph->commitChanges();
743                 }
744                 get_thread(curr)->set_return_value(value);
745                 return updated;
746         }
747 }
748
749 /**
750  * Processes a lock, trylock, or unlock model action.  @param curr is
751  * the read model action to process.
752  *
753  * The try lock operation checks whether the lock is taken.  If not,
754  * it falls to the normal lock operation case.  If so, it returns
755  * fail.
756  *
757  * The lock operation has already been checked that it is enabled, so
758  * it just grabs the lock and synchronizes with the previous unlock.
759  *
760  * The unlock operation has to re-enable all of the threads that are
761  * waiting on the lock.
762  *
763  * @return True if synchronization was updated; false otherwise
764  */
765 bool ModelChecker::process_mutex(ModelAction *curr)
766 {
767         std::mutex *mutex = NULL;
768         struct std::mutex_state *state = NULL;
769
770         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
771                 mutex = (std::mutex *)curr->get_location();
772                 state = mutex->get_state();
773         } else if (curr->is_wait()) {
774                 mutex = (std::mutex *)curr->get_value();
775                 state = mutex->get_state();
776         }
777
778         switch (curr->get_type()) {
779         case ATOMIC_TRYLOCK: {
780                 bool success = !state->islocked;
781                 curr->set_try_lock(success);
782                 if (!success) {
783                         get_thread(curr)->set_return_value(0);
784                         break;
785                 }
786                 get_thread(curr)->set_return_value(1);
787         }
788                 //otherwise fall into the lock case
789         case ATOMIC_LOCK: {
790                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
791                         assert_bug("Lock access before initialization");
792                 state->islocked = true;
793                 ModelAction *unlock = get_last_unlock(curr);
794                 //synchronize with the previous unlock statement
795                 if (unlock != NULL) {
796                         curr->synchronize_with(unlock);
797                         return true;
798                 }
799                 break;
800         }
801         case ATOMIC_UNLOCK: {
802                 //unlock the lock
803                 state->islocked = false;
804                 //wake up the other threads
805                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
806                 //activate all the waiting threads
807                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
808                         scheduler->wake(get_thread(*rit));
809                 }
810                 waiters->clear();
811                 break;
812         }
813         case ATOMIC_WAIT: {
814                 //unlock the lock
815                 state->islocked = false;
816                 //wake up the other threads
817                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
818                 //activate all the waiting threads
819                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
820                         scheduler->wake(get_thread(*rit));
821                 }
822                 waiters->clear();
823                 //check whether we should go to sleep or not...simulate spurious failures
824                 if (curr->get_node()->get_misc() == 0) {
825                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
826                         //disable us
827                         scheduler->sleep(get_thread(curr));
828                 }
829                 break;
830         }
831         case ATOMIC_NOTIFY_ALL: {
832                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
833                 //activate all the waiting threads
834                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
835                         scheduler->wake(get_thread(*rit));
836                 }
837                 waiters->clear();
838                 break;
839         }
840         case ATOMIC_NOTIFY_ONE: {
841                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
842                 int wakeupthread = curr->get_node()->get_misc();
843                 action_list_t::iterator it = waiters->begin();
844                 advance(it, wakeupthread);
845                 scheduler->wake(get_thread(*it));
846                 waiters->erase(it);
847                 break;
848         }
849
850         default:
851                 ASSERT(0);
852         }
853         return false;
854 }
855
856 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
857 {
858         /* Do more ambitious checks now that mo is more complete */
859         if (mo_may_allow(writer, reader)) {
860                 Node *node = reader->get_node();
861
862                 /* Find an ancestor thread which exists at the time of the reader */
863                 Thread *write_thread = get_thread(writer);
864                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
865                         write_thread = write_thread->get_parent();
866
867                 struct future_value fv = {
868                         writer->get_value(),
869                         writer->get_seq_number() + params.maxfuturedelay,
870                         write_thread->get_id(),
871                 };
872                 if (node->add_future_value(fv))
873                         set_latest_backtrack(reader);
874         }
875 }
876
877 /**
878  * Process a write ModelAction
879  * @param curr The ModelAction to process
880  * @return True if the mo_graph was updated or promises were resolved
881  */
882 bool ModelChecker::process_write(ModelAction *curr)
883 {
884         bool updated_mod_order = w_modification_order(curr);
885         bool updated_promises = resolve_promises(curr);
886
887         if (promises->size() == 0) {
888                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
889                         struct PendingFutureValue pfv = (*futurevalues)[i];
890                         add_future_value(pfv.writer, pfv.act);
891                 }
892                 futurevalues->clear();
893         }
894
895         mo_graph->commitChanges();
896         mo_check_promises(curr, false);
897
898         get_thread(curr)->set_return_value(VALUE_NONE);
899         return updated_mod_order || updated_promises;
900 }
901
902 /**
903  * Process a fence ModelAction
904  * @param curr The ModelAction to process
905  * @return True if synchronization was updated
906  */
907 bool ModelChecker::process_fence(ModelAction *curr)
908 {
909         /*
910          * fence-relaxed: no-op
911          * fence-release: only log the occurence (not in this function), for
912          *   use in later synchronization
913          * fence-acquire (this function): search for hypothetical release
914          *   sequences
915          */
916         bool updated = false;
917         if (curr->is_acquire()) {
918                 action_list_t *list = action_trace;
919                 action_list_t::reverse_iterator rit;
920                 /* Find X : is_read(X) && X --sb-> curr */
921                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
922                         ModelAction *act = *rit;
923                         if (act == curr)
924                                 continue;
925                         if (act->get_tid() != curr->get_tid())
926                                 continue;
927                         /* Stop at the beginning of the thread */
928                         if (act->is_thread_start())
929                                 break;
930                         /* Stop once we reach a prior fence-acquire */
931                         if (act->is_fence() && act->is_acquire())
932                                 break;
933                         if (!act->is_read())
934                                 continue;
935                         /* read-acquire will find its own release sequences */
936                         if (act->is_acquire())
937                                 continue;
938
939                         /* Establish hypothetical release sequences */
940                         rel_heads_list_t release_heads;
941                         get_release_seq_heads(curr, act, &release_heads);
942                         for (unsigned int i = 0; i < release_heads.size(); i++)
943                                 if (!curr->synchronize_with(release_heads[i]))
944                                         set_bad_synchronization();
945                         if (release_heads.size() != 0)
946                                 updated = true;
947                 }
948         }
949         return updated;
950 }
951
952 /**
953  * @brief Process the current action for thread-related activity
954  *
955  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
956  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
957  * synchronization, etc.  This function is a no-op for non-THREAD actions
958  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
959  *
960  * @param curr The current action
961  * @return True if synchronization was updated or a thread completed
962  */
963 bool ModelChecker::process_thread_action(ModelAction *curr)
964 {
965         bool updated = false;
966
967         switch (curr->get_type()) {
968         case THREAD_CREATE: {
969                 thrd_t *thrd = (thrd_t *)curr->get_location();
970                 struct thread_params *params = (struct thread_params *)curr->get_value();
971                 Thread *th = new Thread(thrd, params->func, params->arg);
972                 add_thread(th);
973                 th->set_creation(curr);
974                 /* Promises can be satisfied by children */
975                 for (unsigned int i = 0; i < promises->size(); i++) {
976                         Promise *promise = (*promises)[i];
977                         if (promise->thread_is_available(curr->get_tid()))
978                                 promise->add_thread(th->get_id());
979                 }
980                 break;
981         }
982         case THREAD_JOIN: {
983                 Thread *blocking = curr->get_thread_operand();
984                 ModelAction *act = get_last_action(blocking->get_id());
985                 curr->synchronize_with(act);
986                 updated = true; /* trigger rel-seq checks */
987                 break;
988         }
989         case THREAD_FINISH: {
990                 Thread *th = get_thread(curr);
991                 while (!th->wait_list_empty()) {
992                         ModelAction *act = th->pop_wait_list();
993                         scheduler->wake(get_thread(act));
994                 }
995                 th->complete();
996                 /* Completed thread can't satisfy promises */
997                 for (unsigned int i = 0; i < promises->size(); i++) {
998                         Promise *promise = (*promises)[i];
999                         if (promise->thread_is_available(th->get_id()))
1000                                 if (promise->eliminate_thread(th->get_id()))
1001                                         priv->failed_promise = true;
1002                 }
1003                 updated = true; /* trigger rel-seq checks */
1004                 break;
1005         }
1006         case THREAD_START: {
1007                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1008                 break;
1009         }
1010         default:
1011                 break;
1012         }
1013
1014         return updated;
1015 }
1016
1017 /**
1018  * @brief Process the current action for release sequence fixup activity
1019  *
1020  * Performs model-checker release sequence fixups for the current action,
1021  * forcing a single pending release sequence to break (with a given, potential
1022  * "loose" write) or to complete (i.e., synchronize). If a pending release
1023  * sequence forms a complete release sequence, then we must perform the fixup
1024  * synchronization, mo_graph additions, etc.
1025  *
1026  * @param curr The current action; must be a release sequence fixup action
1027  * @param work_queue The work queue to which to add work items as they are
1028  * generated
1029  */
1030 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1031 {
1032         const ModelAction *write = curr->get_node()->get_relseq_break();
1033         struct release_seq *sequence = pending_rel_seqs->back();
1034         pending_rel_seqs->pop_back();
1035         ASSERT(sequence);
1036         ModelAction *acquire = sequence->acquire;
1037         const ModelAction *rf = sequence->rf;
1038         const ModelAction *release = sequence->release;
1039         ASSERT(acquire);
1040         ASSERT(release);
1041         ASSERT(rf);
1042         ASSERT(release->same_thread(rf));
1043
1044         if (write == NULL) {
1045                 /**
1046                  * @todo Forcing a synchronization requires that we set
1047                  * modification order constraints. For instance, we can't allow
1048                  * a fixup sequence in which two separate read-acquire
1049                  * operations read from the same sequence, where the first one
1050                  * synchronizes and the other doesn't. Essentially, we can't
1051                  * allow any writes to insert themselves between 'release' and
1052                  * 'rf'
1053                  */
1054
1055                 /* Must synchronize */
1056                 if (!acquire->synchronize_with(release)) {
1057                         set_bad_synchronization();
1058                         return;
1059                 }
1060                 /* Re-check all pending release sequences */
1061                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1062                 /* Re-check act for mo_graph edges */
1063                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1064
1065                 /* propagate synchronization to later actions */
1066                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1067                 for (; (*rit) != acquire; rit++) {
1068                         ModelAction *propagate = *rit;
1069                         if (acquire->happens_before(propagate)) {
1070                                 propagate->synchronize_with(acquire);
1071                                 /* Re-check 'propagate' for mo_graph edges */
1072                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1073                         }
1074                 }
1075         } else {
1076                 /* Break release sequence with new edges:
1077                  *   release --mo--> write --mo--> rf */
1078                 mo_graph->addEdge(release, write);
1079                 mo_graph->addEdge(write, rf);
1080         }
1081
1082         /* See if we have realized a data race */
1083         checkDataRaces();
1084 }
1085
1086 /**
1087  * Initialize the current action by performing one or more of the following
1088  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1089  * in the NodeStack, manipulating backtracking sets, allocating and
1090  * initializing clock vectors, and computing the promises to fulfill.
1091  *
1092  * @param curr The current action, as passed from the user context; may be
1093  * freed/invalidated after the execution of this function, with a different
1094  * action "returned" its place (pass-by-reference)
1095  * @return True if curr is a newly-explored action; false otherwise
1096  */
1097 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1098 {
1099         ModelAction *newcurr;
1100
1101         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1102                 newcurr = process_rmw(*curr);
1103                 delete *curr;
1104
1105                 if (newcurr->is_rmw())
1106                         compute_promises(newcurr);
1107
1108                 *curr = newcurr;
1109                 return false;
1110         }
1111
1112         (*curr)->set_seq_number(get_next_seq_num());
1113
1114         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1115         if (newcurr) {
1116                 /* First restore type and order in case of RMW operation */
1117                 if ((*curr)->is_rmwr())
1118                         newcurr->copy_typeandorder(*curr);
1119
1120                 ASSERT((*curr)->get_location() == newcurr->get_location());
1121                 newcurr->copy_from_new(*curr);
1122
1123                 /* Discard duplicate ModelAction; use action from NodeStack */
1124                 delete *curr;
1125
1126                 /* Always compute new clock vector */
1127                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1128
1129                 *curr = newcurr;
1130                 return false; /* Action was explored previously */
1131         } else {
1132                 newcurr = *curr;
1133
1134                 /* Always compute new clock vector */
1135                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1136
1137                 /* Assign most recent release fence */
1138                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1139
1140                 /*
1141                  * Perform one-time actions when pushing new ModelAction onto
1142                  * NodeStack
1143                  */
1144                 if (newcurr->is_write())
1145                         compute_promises(newcurr);
1146                 else if (newcurr->is_relseq_fixup())
1147                         compute_relseq_breakwrites(newcurr);
1148                 else if (newcurr->is_wait())
1149                         newcurr->get_node()->set_misc_max(2);
1150                 else if (newcurr->is_notify_one()) {
1151                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1152                 }
1153                 return true; /* This was a new ModelAction */
1154         }
1155 }
1156
1157 /**
1158  * @brief Establish reads-from relation between two actions
1159  *
1160  * Perform basic operations involved with establishing a concrete rf relation,
1161  * including setting the ModelAction data and checking for release sequences.
1162  *
1163  * @param act The action that is reading (must be a read)
1164  * @param rf The action from which we are reading (must be a write)
1165  *
1166  * @return True if this read established synchronization
1167  */
1168 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1169 {
1170         act->set_read_from(rf);
1171         if (rf != NULL && act->is_acquire()) {
1172                 rel_heads_list_t release_heads;
1173                 get_release_seq_heads(act, act, &release_heads);
1174                 int num_heads = release_heads.size();
1175                 for (unsigned int i = 0; i < release_heads.size(); i++)
1176                         if (!act->synchronize_with(release_heads[i])) {
1177                                 set_bad_synchronization();
1178                                 num_heads--;
1179                         }
1180                 return num_heads > 0;
1181         }
1182         return false;
1183 }
1184
1185 /**
1186  * @brief Check whether a model action is enabled.
1187  *
1188  * Checks whether a lock or join operation would be successful (i.e., is the
1189  * lock already locked, or is the joined thread already complete). If not, put
1190  * the action in a waiter list.
1191  *
1192  * @param curr is the ModelAction to check whether it is enabled.
1193  * @return a bool that indicates whether the action is enabled.
1194  */
1195 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1196         if (curr->is_lock()) {
1197                 std::mutex *lock = (std::mutex *)curr->get_location();
1198                 struct std::mutex_state *state = lock->get_state();
1199                 if (state->islocked) {
1200                         //Stick the action in the appropriate waiting queue
1201                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1202                         return false;
1203                 }
1204         } else if (curr->get_type() == THREAD_JOIN) {
1205                 Thread *blocking = (Thread *)curr->get_location();
1206                 if (!blocking->is_complete()) {
1207                         blocking->push_wait_list(curr);
1208                         return false;
1209                 }
1210         }
1211
1212         return true;
1213 }
1214
1215 /**
1216  * This is the heart of the model checker routine. It performs model-checking
1217  * actions corresponding to a given "current action." Among other processes, it
1218  * calculates reads-from relationships, updates synchronization clock vectors,
1219  * forms a memory_order constraints graph, and handles replay/backtrack
1220  * execution when running permutations of previously-observed executions.
1221  *
1222  * @param curr The current action to process
1223  * @return The ModelAction that is actually executed; may be different than
1224  * curr; may be NULL, if the current action is not enabled to run
1225  */
1226 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1227 {
1228         ASSERT(curr);
1229         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1230
1231         if (!check_action_enabled(curr)) {
1232                 /* Make the execution look like we chose to run this action
1233                  * much later, when a lock/join can succeed */
1234                 get_thread(curr)->set_pending(curr);
1235                 scheduler->sleep(get_thread(curr));
1236                 return NULL;
1237         }
1238
1239         bool newly_explored = initialize_curr_action(&curr);
1240
1241         DBG();
1242         if (DBG_ENABLED())
1243                 curr->print();
1244
1245         wake_up_sleeping_actions(curr);
1246
1247         /* Add the action to lists before any other model-checking tasks */
1248         if (!second_part_of_rmw)
1249                 add_action_to_lists(curr);
1250
1251         /* Build may_read_from set for newly-created actions */
1252         if (newly_explored && curr->is_read())
1253                 build_reads_from_past(curr);
1254
1255         /* Initialize work_queue with the "current action" work */
1256         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1257         while (!work_queue.empty() && !has_asserted()) {
1258                 WorkQueueEntry work = work_queue.front();
1259                 work_queue.pop_front();
1260
1261                 switch (work.type) {
1262                 case WORK_CHECK_CURR_ACTION: {
1263                         ModelAction *act = work.action;
1264                         bool update = false; /* update this location's release seq's */
1265                         bool update_all = false; /* update all release seq's */
1266
1267                         if (process_thread_action(curr))
1268                                 update_all = true;
1269
1270                         if (act->is_read() && process_read(act, second_part_of_rmw))
1271                                 update = true;
1272
1273                         if (act->is_write() && process_write(act))
1274                                 update = true;
1275
1276                         if (act->is_fence() && process_fence(act))
1277                                 update_all = true;
1278
1279                         if (act->is_mutex_op() && process_mutex(act))
1280                                 update_all = true;
1281
1282                         if (act->is_relseq_fixup())
1283                                 process_relseq_fixup(curr, &work_queue);
1284
1285                         if (update_all)
1286                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1287                         else if (update)
1288                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1289                         break;
1290                 }
1291                 case WORK_CHECK_RELEASE_SEQ:
1292                         resolve_release_sequences(work.location, &work_queue);
1293                         break;
1294                 case WORK_CHECK_MO_EDGES: {
1295                         /** @todo Complete verification of work_queue */
1296                         ModelAction *act = work.action;
1297                         bool updated = false;
1298
1299                         if (act->is_read()) {
1300                                 const ModelAction *rf = act->get_reads_from();
1301                                 const Promise *promise = act->get_reads_from_promise();
1302                                 if (rf) {
1303                                         if (r_modification_order(act, rf))
1304                                                 updated = true;
1305                                 } else if (promise) {
1306                                         if (r_modification_order(act, promise))
1307                                                 updated = true;
1308                                 }
1309                         }
1310                         if (act->is_write()) {
1311                                 if (w_modification_order(act))
1312                                         updated = true;
1313                         }
1314                         mo_graph->commitChanges();
1315
1316                         if (updated)
1317                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1318                         break;
1319                 }
1320                 default:
1321                         ASSERT(false);
1322                         break;
1323                 }
1324         }
1325
1326         check_curr_backtracking(curr);
1327         set_backtracking(curr);
1328         return curr;
1329 }
1330
1331 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1332 {
1333         Node *currnode = curr->get_node();
1334         Node *parnode = currnode->get_parent();
1335
1336         if ((parnode && !parnode->backtrack_empty()) ||
1337                          !currnode->misc_empty() ||
1338                          !currnode->read_from_empty() ||
1339                          !currnode->future_value_empty() ||
1340                          !currnode->promise_empty() ||
1341                          !currnode->relseq_break_empty()) {
1342                 set_latest_backtrack(curr);
1343         }
1344 }
1345
1346 bool ModelChecker::promises_expired() const
1347 {
1348         for (unsigned int i = 0; i < promises->size(); i++) {
1349                 Promise *promise = (*promises)[i];
1350                 if (promise->get_expiration() < priv->used_sequence_numbers)
1351                         return true;
1352         }
1353         return false;
1354 }
1355
1356 /**
1357  * This is the strongest feasibility check available.
1358  * @return whether the current trace (partial or complete) must be a prefix of
1359  * a feasible trace.
1360  */
1361 bool ModelChecker::isfeasibleprefix() const
1362 {
1363         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1364 }
1365
1366 /**
1367  * Print disagnostic information about an infeasible execution
1368  * @param prefix A string to prefix the output with; if NULL, then a default
1369  * message prefix will be provided
1370  */
1371 void ModelChecker::print_infeasibility(const char *prefix) const
1372 {
1373         char buf[100];
1374         char *ptr = buf;
1375         if (mo_graph->checkForCycles())
1376                 ptr += sprintf(ptr, "[mo cycle]");
1377         if (priv->failed_promise)
1378                 ptr += sprintf(ptr, "[failed promise]");
1379         if (priv->too_many_reads)
1380                 ptr += sprintf(ptr, "[too many reads]");
1381         if (priv->no_valid_reads)
1382                 ptr += sprintf(ptr, "[no valid reads-from]");
1383         if (priv->bad_synchronization)
1384                 ptr += sprintf(ptr, "[bad sw ordering]");
1385         if (promises_expired())
1386                 ptr += sprintf(ptr, "[promise expired]");
1387         if (promises->size() != 0)
1388                 ptr += sprintf(ptr, "[unresolved promise]");
1389         if (ptr != buf)
1390                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1391 }
1392
1393 /**
1394  * Returns whether the current completed trace is feasible, except for pending
1395  * release sequences.
1396  */
1397 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1398 {
1399         return !is_infeasible() && promises->size() == 0;
1400 }
1401
1402 /**
1403  * Check if the current partial trace is infeasible. Does not check any
1404  * end-of-execution flags, which might rule out the execution. Thus, this is
1405  * useful only for ruling an execution as infeasible.
1406  * @return whether the current partial trace is infeasible.
1407  */
1408 bool ModelChecker::is_infeasible() const
1409 {
1410         return mo_graph->checkForCycles() ||
1411                 priv->no_valid_reads ||
1412                 priv->failed_promise ||
1413                 priv->too_many_reads ||
1414                 priv->bad_synchronization ||
1415                 promises_expired();
1416 }
1417
1418 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1419 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1420         ModelAction *lastread = get_last_action(act->get_tid());
1421         lastread->process_rmw(act);
1422         if (act->is_rmw()) {
1423                 if (lastread->get_reads_from())
1424                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1425                 else
1426                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1427                 mo_graph->commitChanges();
1428         }
1429         return lastread;
1430 }
1431
1432 /**
1433  * Checks whether a thread has read from the same write for too many times
1434  * without seeing the effects of a later write.
1435  *
1436  * Basic idea:
1437  * 1) there must a different write that we could read from that would satisfy the modification order,
1438  * 2) we must have read from the same value in excess of maxreads times, and
1439  * 3) that other write must have been in the reads_from set for maxreads times.
1440  *
1441  * If so, we decide that the execution is no longer feasible.
1442  */
1443 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1444 {
1445         if (params.maxreads != 0) {
1446                 if (curr->get_node()->get_read_from_size() <= 1)
1447                         return;
1448                 //Must make sure that execution is currently feasible...  We could
1449                 //accidentally clear by rolling back
1450                 if (is_infeasible())
1451                         return;
1452                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1453                 int tid = id_to_int(curr->get_tid());
1454
1455                 /* Skip checks */
1456                 if ((int)thrd_lists->size() <= tid)
1457                         return;
1458                 action_list_t *list = &(*thrd_lists)[tid];
1459
1460                 action_list_t::reverse_iterator rit = list->rbegin();
1461                 /* Skip past curr */
1462                 for (; (*rit) != curr; rit++)
1463                         ;
1464                 /* go past curr now */
1465                 rit++;
1466
1467                 action_list_t::reverse_iterator ritcopy = rit;
1468                 //See if we have enough reads from the same value
1469                 int count = 0;
1470                 for (; count < params.maxreads; rit++, count++) {
1471                         if (rit == list->rend())
1472                                 return;
1473                         ModelAction *act = *rit;
1474                         if (!act->is_read())
1475                                 return;
1476
1477                         if (act->get_reads_from() != rf)
1478                                 return;
1479                         if (act->get_node()->get_read_from_size() <= 1)
1480                                 return;
1481                 }
1482                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1483                         /* Get write */
1484                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1485
1486                         /* Need a different write */
1487                         if (write == rf)
1488                                 continue;
1489
1490                         /* Test to see whether this is a feasible write to read from */
1491                         /** NOTE: all members of read-from set should be
1492                          *  feasible, so we no longer check it here **/
1493
1494                         rit = ritcopy;
1495
1496                         bool feasiblewrite = true;
1497                         //new we need to see if this write works for everyone
1498
1499                         for (int loop = count; loop > 0; loop--, rit++) {
1500                                 ModelAction *act = *rit;
1501                                 bool foundvalue = false;
1502                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1503                                         if (act->get_node()->get_read_from_at(j) == write) {
1504                                                 foundvalue = true;
1505                                                 break;
1506                                         }
1507                                 }
1508                                 if (!foundvalue) {
1509                                         feasiblewrite = false;
1510                                         break;
1511                                 }
1512                         }
1513                         if (feasiblewrite) {
1514                                 priv->too_many_reads = true;
1515                                 return;
1516                         }
1517                 }
1518         }
1519 }
1520
1521 /**
1522  * Updates the mo_graph with the constraints imposed from the current
1523  * read.
1524  *
1525  * Basic idea is the following: Go through each other thread and find
1526  * the last action that happened before our read.  Two cases:
1527  *
1528  * (1) The action is a write => that write must either occur before
1529  * the write we read from or be the write we read from.
1530  *
1531  * (2) The action is a read => the write that that action read from
1532  * must occur before the write we read from or be the same write.
1533  *
1534  * @param curr The current action. Must be a read.
1535  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1536  * @return True if modification order edges were added; false otherwise
1537  */
1538 template <typename rf_type>
1539 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1540 {
1541         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1542         unsigned int i;
1543         bool added = false;
1544         ASSERT(curr->is_read());
1545
1546         /* Last SC fence in the current thread */
1547         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1548
1549         /* Iterate over all threads */
1550         for (i = 0; i < thrd_lists->size(); i++) {
1551                 /* Last SC fence in thread i */
1552                 ModelAction *last_sc_fence_thread_local = NULL;
1553                 if (int_to_id((int)i) != curr->get_tid())
1554                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1555
1556                 /* Last SC fence in thread i, before last SC fence in current thread */
1557                 ModelAction *last_sc_fence_thread_before = NULL;
1558                 if (last_sc_fence_local)
1559                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1560
1561                 /* Iterate over actions in thread, starting from most recent */
1562                 action_list_t *list = &(*thrd_lists)[i];
1563                 action_list_t::reverse_iterator rit;
1564                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1565                         ModelAction *act = *rit;
1566
1567                         if (act->is_write() && !act->equals(rf) && act != curr) {
1568                                 /* C++, Section 29.3 statement 5 */
1569                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1570                                                 *act < *last_sc_fence_thread_local) {
1571                                         added = mo_graph->addEdge(act, rf) || added;
1572                                         break;
1573                                 }
1574                                 /* C++, Section 29.3 statement 4 */
1575                                 else if (act->is_seqcst() && last_sc_fence_local &&
1576                                                 *act < *last_sc_fence_local) {
1577                                         added = mo_graph->addEdge(act, rf) || added;
1578                                         break;
1579                                 }
1580                                 /* C++, Section 29.3 statement 6 */
1581                                 else if (last_sc_fence_thread_before &&
1582                                                 *act < *last_sc_fence_thread_before) {
1583                                         added = mo_graph->addEdge(act, rf) || added;
1584                                         break;
1585                                 }
1586                         }
1587
1588                         /*
1589                          * Include at most one act per-thread that "happens
1590                          * before" curr. Don't consider reflexively.
1591                          */
1592                         if (act->happens_before(curr) && act != curr) {
1593                                 if (act->is_write()) {
1594                                         if (!act->equals(rf)) {
1595                                                 added = mo_graph->addEdge(act, rf) || added;
1596                                         }
1597                                 } else {
1598                                         const ModelAction *prevreadfrom = act->get_reads_from();
1599                                         //if the previous read is unresolved, keep going...
1600                                         if (prevreadfrom == NULL)
1601                                                 continue;
1602
1603                                         if (!prevreadfrom->equals(rf)) {
1604                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1605                                         }
1606                                 }
1607                                 break;
1608                         }
1609                 }
1610         }
1611
1612         /*
1613          * All compatible, thread-exclusive promises must be ordered after any
1614          * concrete loads from the same thread
1615          */
1616         for (unsigned int i = 0; i < promises->size(); i++)
1617                 if ((*promises)[i]->is_compatible_exclusive(curr))
1618                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1619
1620         return added;
1621 }
1622
1623 /**
1624  * Updates the mo_graph with the constraints imposed from the current write.
1625  *
1626  * Basic idea is the following: Go through each other thread and find
1627  * the lastest action that happened before our write.  Two cases:
1628  *
1629  * (1) The action is a write => that write must occur before
1630  * the current write
1631  *
1632  * (2) The action is a read => the write that that action read from
1633  * must occur before the current write.
1634  *
1635  * This method also handles two other issues:
1636  *
1637  * (I) Sequential Consistency: Making sure that if the current write is
1638  * seq_cst, that it occurs after the previous seq_cst write.
1639  *
1640  * (II) Sending the write back to non-synchronizing reads.
1641  *
1642  * @param curr The current action. Must be a write.
1643  * @return True if modification order edges were added; false otherwise
1644  */
1645 bool ModelChecker::w_modification_order(ModelAction *curr)
1646 {
1647         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1648         unsigned int i;
1649         bool added = false;
1650         ASSERT(curr->is_write());
1651
1652         if (curr->is_seqcst()) {
1653                 /* We have to at least see the last sequentially consistent write,
1654                          so we are initialized. */
1655                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1656                 if (last_seq_cst != NULL) {
1657                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1658                 }
1659         }
1660
1661         /* Last SC fence in the current thread */
1662         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1663
1664         /* Iterate over all threads */
1665         for (i = 0; i < thrd_lists->size(); i++) {
1666                 /* Last SC fence in thread i, before last SC fence in current thread */
1667                 ModelAction *last_sc_fence_thread_before = NULL;
1668                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1669                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1670
1671                 /* Iterate over actions in thread, starting from most recent */
1672                 action_list_t *list = &(*thrd_lists)[i];
1673                 action_list_t::reverse_iterator rit;
1674                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1675                         ModelAction *act = *rit;
1676                         if (act == curr) {
1677                                 /*
1678                                  * 1) If RMW and it actually read from something, then we
1679                                  * already have all relevant edges, so just skip to next
1680                                  * thread.
1681                                  *
1682                                  * 2) If RMW and it didn't read from anything, we should
1683                                  * whatever edge we can get to speed up convergence.
1684                                  *
1685                                  * 3) If normal write, we need to look at earlier actions, so
1686                                  * continue processing list.
1687                                  */
1688                                 if (curr->is_rmw()) {
1689                                         if (curr->get_reads_from() != NULL)
1690                                                 break;
1691                                         else
1692                                                 continue;
1693                                 } else
1694                                         continue;
1695                         }
1696
1697                         /* C++, Section 29.3 statement 7 */
1698                         if (last_sc_fence_thread_before && act->is_write() &&
1699                                         *act < *last_sc_fence_thread_before) {
1700                                 added = mo_graph->addEdge(act, curr) || added;
1701                                 break;
1702                         }
1703
1704                         /*
1705                          * Include at most one act per-thread that "happens
1706                          * before" curr
1707                          */
1708                         if (act->happens_before(curr)) {
1709                                 /*
1710                                  * Note: if act is RMW, just add edge:
1711                                  *   act --mo--> curr
1712                                  * The following edge should be handled elsewhere:
1713                                  *   readfrom(act) --mo--> act
1714                                  */
1715                                 if (act->is_write())
1716                                         added = mo_graph->addEdge(act, curr) || added;
1717                                 else if (act->is_read()) {
1718                                         //if previous read accessed a null, just keep going
1719                                         if (act->get_reads_from() == NULL)
1720                                                 continue;
1721                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1722                                 }
1723                                 break;
1724                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1725                                                      !act->same_thread(curr)) {
1726                                 /* We have an action that:
1727                                    (1) did not happen before us
1728                                    (2) is a read and we are a write
1729                                    (3) cannot synchronize with us
1730                                    (4) is in a different thread
1731                                    =>
1732                                    that read could potentially read from our write.  Note that
1733                                    these checks are overly conservative at this point, we'll
1734                                    do more checks before actually removing the
1735                                    pendingfuturevalue.
1736
1737                                  */
1738                                 if (thin_air_constraint_may_allow(curr, act)) {
1739                                         if (!is_infeasible())
1740                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1741                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1742                                                 add_future_value(curr, act);
1743                                 }
1744                         }
1745                 }
1746         }
1747
1748         /*
1749          * All compatible, thread-exclusive promises must be ordered after any
1750          * concrete stores to the same thread, or else they can be merged with
1751          * this store later
1752          */
1753         for (unsigned int i = 0; i < promises->size(); i++)
1754                 if ((*promises)[i]->is_compatible_exclusive(curr))
1755                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1756
1757         return added;
1758 }
1759
1760 /** Arbitrary reads from the future are not allowed.  Section 29.3
1761  * part 9 places some constraints.  This method checks one result of constraint
1762  * constraint.  Others require compiler support. */
1763 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1764 {
1765         if (!writer->is_rmw())
1766                 return true;
1767
1768         if (!reader->is_rmw())
1769                 return true;
1770
1771         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1772                 if (search == reader)
1773                         return false;
1774                 if (search->get_tid() == reader->get_tid() &&
1775                                 search->happens_before(reader))
1776                         break;
1777         }
1778
1779         return true;
1780 }
1781
1782 /**
1783  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1784  * some constraints. This method checks one the following constraint (others
1785  * require compiler support):
1786  *
1787  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1788  */
1789 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1790 {
1791         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1792         unsigned int i;
1793         /* Iterate over all threads */
1794         for (i = 0; i < thrd_lists->size(); i++) {
1795                 const ModelAction *write_after_read = NULL;
1796
1797                 /* Iterate over actions in thread, starting from most recent */
1798                 action_list_t *list = &(*thrd_lists)[i];
1799                 action_list_t::reverse_iterator rit;
1800                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1801                         ModelAction *act = *rit;
1802
1803                         /* Don't disallow due to act == reader */
1804                         if (!reader->happens_before(act) || reader == act)
1805                                 break;
1806                         else if (act->is_write())
1807                                 write_after_read = act;
1808                         else if (act->is_read() && act->get_reads_from() != NULL)
1809                                 write_after_read = act->get_reads_from();
1810                 }
1811
1812                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1813                         return false;
1814         }
1815         return true;
1816 }
1817
1818 /**
1819  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1820  * The ModelAction under consideration is expected to be taking part in
1821  * release/acquire synchronization as an object of the "reads from" relation.
1822  * Note that this can only provide release sequence support for RMW chains
1823  * which do not read from the future, as those actions cannot be traced until
1824  * their "promise" is fulfilled. Similarly, we may not even establish the
1825  * presence of a release sequence with certainty, as some modification order
1826  * constraints may be decided further in the future. Thus, this function
1827  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1828  * and a boolean representing certainty.
1829  *
1830  * @param rf The action that might be part of a release sequence. Must be a
1831  * write.
1832  * @param release_heads A pass-by-reference style return parameter. After
1833  * execution of this function, release_heads will contain the heads of all the
1834  * relevant release sequences, if any exists with certainty
1835  * @param pending A pass-by-reference style return parameter which is only used
1836  * when returning false (i.e., uncertain). Returns most information regarding
1837  * an uncertain release sequence, including any write operations that might
1838  * break the sequence.
1839  * @return true, if the ModelChecker is certain that release_heads is complete;
1840  * false otherwise
1841  */
1842 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1843                 rel_heads_list_t *release_heads,
1844                 struct release_seq *pending) const
1845 {
1846         /* Only check for release sequences if there are no cycles */
1847         if (mo_graph->checkForCycles())
1848                 return false;
1849
1850         while (rf) {
1851                 ASSERT(rf->is_write());
1852
1853                 if (rf->is_release())
1854                         release_heads->push_back(rf);
1855                 else if (rf->get_last_fence_release())
1856                         release_heads->push_back(rf->get_last_fence_release());
1857                 if (!rf->is_rmw())
1858                         break; /* End of RMW chain */
1859
1860                 /** @todo Need to be smarter here...  In the linux lock
1861                  * example, this will run to the beginning of the program for
1862                  * every acquire. */
1863                 /** @todo The way to be smarter here is to keep going until 1
1864                  * thread has a release preceded by an acquire and you've seen
1865                  *       both. */
1866
1867                 /* acq_rel RMW is a sufficient stopping condition */
1868                 if (rf->is_acquire() && rf->is_release())
1869                         return true; /* complete */
1870
1871                 rf = rf->get_reads_from();
1872         };
1873         if (!rf) {
1874                 /* read from future: need to settle this later */
1875                 pending->rf = NULL;
1876                 return false; /* incomplete */
1877         }
1878
1879         if (rf->is_release())
1880                 return true; /* complete */
1881
1882         /* else relaxed write
1883          * - check for fence-release in the same thread (29.8, stmt. 3)
1884          * - check modification order for contiguous subsequence
1885          *   -> rf must be same thread as release */
1886
1887         const ModelAction *fence_release = rf->get_last_fence_release();
1888         /* Synchronize with a fence-release unconditionally; we don't need to
1889          * find any more "contiguous subsequence..." for it */
1890         if (fence_release)
1891                 release_heads->push_back(fence_release);
1892
1893         int tid = id_to_int(rf->get_tid());
1894         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1895         action_list_t *list = &(*thrd_lists)[tid];
1896         action_list_t::const_reverse_iterator rit;
1897
1898         /* Find rf in the thread list */
1899         rit = std::find(list->rbegin(), list->rend(), rf);
1900         ASSERT(rit != list->rend());
1901
1902         /* Find the last {write,fence}-release */
1903         for (; rit != list->rend(); rit++) {
1904                 if (fence_release && *(*rit) < *fence_release)
1905                         break;
1906                 if ((*rit)->is_release())
1907                         break;
1908         }
1909         if (rit == list->rend()) {
1910                 /* No write-release in this thread */
1911                 return true; /* complete */
1912         } else if (fence_release && *(*rit) < *fence_release) {
1913                 /* The fence-release is more recent (and so, "stronger") than
1914                  * the most recent write-release */
1915                 return true; /* complete */
1916         } /* else, need to establish contiguous release sequence */
1917         ModelAction *release = *rit;
1918
1919         ASSERT(rf->same_thread(release));
1920
1921         pending->writes.clear();
1922
1923         bool certain = true;
1924         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1925                 if (id_to_int(rf->get_tid()) == (int)i)
1926                         continue;
1927                 list = &(*thrd_lists)[i];
1928
1929                 /* Can we ensure no future writes from this thread may break
1930                  * the release seq? */
1931                 bool future_ordered = false;
1932
1933                 ModelAction *last = get_last_action(int_to_id(i));
1934                 Thread *th = get_thread(int_to_id(i));
1935                 if ((last && rf->happens_before(last)) ||
1936                                 !is_enabled(th) ||
1937                                 th->is_complete())
1938                         future_ordered = true;
1939
1940                 ASSERT(!th->is_model_thread() || future_ordered);
1941
1942                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1943                         const ModelAction *act = *rit;
1944                         /* Reach synchronization -> this thread is complete */
1945                         if (act->happens_before(release))
1946                                 break;
1947                         if (rf->happens_before(act)) {
1948                                 future_ordered = true;
1949                                 continue;
1950                         }
1951
1952                         /* Only non-RMW writes can break release sequences */
1953                         if (!act->is_write() || act->is_rmw())
1954                                 continue;
1955
1956                         /* Check modification order */
1957                         if (mo_graph->checkReachable(rf, act)) {
1958                                 /* rf --mo--> act */
1959                                 future_ordered = true;
1960                                 continue;
1961                         }
1962                         if (mo_graph->checkReachable(act, release))
1963                                 /* act --mo--> release */
1964                                 break;
1965                         if (mo_graph->checkReachable(release, act) &&
1966                                       mo_graph->checkReachable(act, rf)) {
1967                                 /* release --mo-> act --mo--> rf */
1968                                 return true; /* complete */
1969                         }
1970                         /* act may break release sequence */
1971                         pending->writes.push_back(act);
1972                         certain = false;
1973                 }
1974                 if (!future_ordered)
1975                         certain = false; /* This thread is uncertain */
1976         }
1977
1978         if (certain) {
1979                 release_heads->push_back(release);
1980                 pending->writes.clear();
1981         } else {
1982                 pending->release = release;
1983                 pending->rf = rf;
1984         }
1985         return certain;
1986 }
1987
1988 /**
1989  * An interface for getting the release sequence head(s) with which a
1990  * given ModelAction must synchronize. This function only returns a non-empty
1991  * result when it can locate a release sequence head with certainty. Otherwise,
1992  * it may mark the internal state of the ModelChecker so that it will handle
1993  * the release sequence at a later time, causing @a acquire to update its
1994  * synchronization at some later point in execution.
1995  *
1996  * @param acquire The 'acquire' action that may synchronize with a release
1997  * sequence
1998  * @param read The read action that may read from a release sequence; this may
1999  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2000  * when 'acquire' is a fence-acquire)
2001  * @param release_heads A pass-by-reference return parameter. Will be filled
2002  * with the head(s) of the release sequence(s), if they exists with certainty.
2003  * @see ModelChecker::release_seq_heads
2004  */
2005 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2006                 ModelAction *read, rel_heads_list_t *release_heads)
2007 {
2008         const ModelAction *rf = read->get_reads_from();
2009         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2010         sequence->acquire = acquire;
2011         sequence->read = read;
2012
2013         if (!release_seq_heads(rf, release_heads, sequence)) {
2014                 /* add act to 'lazy checking' list */
2015                 pending_rel_seqs->push_back(sequence);
2016         } else {
2017                 snapshot_free(sequence);
2018         }
2019 }
2020
2021 /**
2022  * Attempt to resolve all stashed operations that might synchronize with a
2023  * release sequence for a given location. This implements the "lazy" portion of
2024  * determining whether or not a release sequence was contiguous, since not all
2025  * modification order information is present at the time an action occurs.
2026  *
2027  * @param location The location/object that should be checked for release
2028  * sequence resolutions. A NULL value means to check all locations.
2029  * @param work_queue The work queue to which to add work items as they are
2030  * generated
2031  * @return True if any updates occurred (new synchronization, new mo_graph
2032  * edges)
2033  */
2034 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2035 {
2036         bool updated = false;
2037         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2038         while (it != pending_rel_seqs->end()) {
2039                 struct release_seq *pending = *it;
2040                 ModelAction *acquire = pending->acquire;
2041                 const ModelAction *read = pending->read;
2042
2043                 /* Only resolve sequences on the given location, if provided */
2044                 if (location && read->get_location() != location) {
2045                         it++;
2046                         continue;
2047                 }
2048
2049                 const ModelAction *rf = read->get_reads_from();
2050                 rel_heads_list_t release_heads;
2051                 bool complete;
2052                 complete = release_seq_heads(rf, &release_heads, pending);
2053                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2054                         if (!acquire->has_synchronized_with(release_heads[i])) {
2055                                 if (acquire->synchronize_with(release_heads[i]))
2056                                         updated = true;
2057                                 else
2058                                         set_bad_synchronization();
2059                         }
2060                 }
2061
2062                 if (updated) {
2063                         /* Re-check all pending release sequences */
2064                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2065                         /* Re-check read-acquire for mo_graph edges */
2066                         if (acquire->is_read())
2067                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2068
2069                         /* propagate synchronization to later actions */
2070                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2071                         for (; (*rit) != acquire; rit++) {
2072                                 ModelAction *propagate = *rit;
2073                                 if (acquire->happens_before(propagate)) {
2074                                         propagate->synchronize_with(acquire);
2075                                         /* Re-check 'propagate' for mo_graph edges */
2076                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2077                                 }
2078                         }
2079                 }
2080                 if (complete) {
2081                         it = pending_rel_seqs->erase(it);
2082                         snapshot_free(pending);
2083                 } else {
2084                         it++;
2085                 }
2086         }
2087
2088         // If we resolved promises or data races, see if we have realized a data race.
2089         checkDataRaces();
2090
2091         return updated;
2092 }
2093
2094 /**
2095  * Performs various bookkeeping operations for the current ModelAction. For
2096  * instance, adds action to the per-object, per-thread action vector and to the
2097  * action trace list of all thread actions.
2098  *
2099  * @param act is the ModelAction to add.
2100  */
2101 void ModelChecker::add_action_to_lists(ModelAction *act)
2102 {
2103         int tid = id_to_int(act->get_tid());
2104         ModelAction *uninit = NULL;
2105         int uninit_id = -1;
2106         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2107         if (list->empty() && act->is_atomic_var()) {
2108                 uninit = new_uninitialized_action(act->get_location());
2109                 uninit_id = id_to_int(uninit->get_tid());
2110                 list->push_back(uninit);
2111         }
2112         list->push_back(act);
2113
2114         action_trace->push_back(act);
2115         if (uninit)
2116                 action_trace->push_front(uninit);
2117
2118         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2119         if (tid >= (int)vec->size())
2120                 vec->resize(priv->next_thread_id);
2121         (*vec)[tid].push_back(act);
2122         if (uninit)
2123                 (*vec)[uninit_id].push_front(uninit);
2124
2125         if ((int)thrd_last_action->size() <= tid)
2126                 thrd_last_action->resize(get_num_threads());
2127         (*thrd_last_action)[tid] = act;
2128         if (uninit)
2129                 (*thrd_last_action)[uninit_id] = uninit;
2130
2131         if (act->is_fence() && act->is_release()) {
2132                 if ((int)thrd_last_fence_release->size() <= tid)
2133                         thrd_last_fence_release->resize(get_num_threads());
2134                 (*thrd_last_fence_release)[tid] = act;
2135         }
2136
2137         if (act->is_wait()) {
2138                 void *mutex_loc = (void *) act->get_value();
2139                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2140
2141                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2142                 if (tid >= (int)vec->size())
2143                         vec->resize(priv->next_thread_id);
2144                 (*vec)[tid].push_back(act);
2145         }
2146 }
2147
2148 /**
2149  * @brief Get the last action performed by a particular Thread
2150  * @param tid The thread ID of the Thread in question
2151  * @return The last action in the thread
2152  */
2153 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2154 {
2155         int threadid = id_to_int(tid);
2156         if (threadid < (int)thrd_last_action->size())
2157                 return (*thrd_last_action)[id_to_int(tid)];
2158         else
2159                 return NULL;
2160 }
2161
2162 /**
2163  * @brief Get the last fence release performed by a particular Thread
2164  * @param tid The thread ID of the Thread in question
2165  * @return The last fence release in the thread, if one exists; NULL otherwise
2166  */
2167 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2168 {
2169         int threadid = id_to_int(tid);
2170         if (threadid < (int)thrd_last_fence_release->size())
2171                 return (*thrd_last_fence_release)[id_to_int(tid)];
2172         else
2173                 return NULL;
2174 }
2175
2176 /**
2177  * Gets the last memory_order_seq_cst write (in the total global sequence)
2178  * performed on a particular object (i.e., memory location), not including the
2179  * current action.
2180  * @param curr The current ModelAction; also denotes the object location to
2181  * check
2182  * @return The last seq_cst write
2183  */
2184 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2185 {
2186         void *location = curr->get_location();
2187         action_list_t *list = get_safe_ptr_action(obj_map, location);
2188         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2189         action_list_t::reverse_iterator rit;
2190         for (rit = list->rbegin(); rit != list->rend(); rit++)
2191                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2192                         return *rit;
2193         return NULL;
2194 }
2195
2196 /**
2197  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2198  * performed in a particular thread, prior to a particular fence.
2199  * @param tid The ID of the thread to check
2200  * @param before_fence The fence from which to begin the search; if NULL, then
2201  * search for the most recent fence in the thread.
2202  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2203  */
2204 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2205 {
2206         /* All fences should have NULL location */
2207         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2208         action_list_t::reverse_iterator rit = list->rbegin();
2209
2210         if (before_fence) {
2211                 for (; rit != list->rend(); rit++)
2212                         if (*rit == before_fence)
2213                                 break;
2214
2215                 ASSERT(*rit == before_fence);
2216                 rit++;
2217         }
2218
2219         for (; rit != list->rend(); rit++)
2220                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2221                         return *rit;
2222         return NULL;
2223 }
2224
2225 /**
2226  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2227  * location). This function identifies the mutex according to the current
2228  * action, which is presumed to perform on the same mutex.
2229  * @param curr The current ModelAction; also denotes the object location to
2230  * check
2231  * @return The last unlock operation
2232  */
2233 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2234 {
2235         void *location = curr->get_location();
2236         action_list_t *list = get_safe_ptr_action(obj_map, location);
2237         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2238         action_list_t::reverse_iterator rit;
2239         for (rit = list->rbegin(); rit != list->rend(); rit++)
2240                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2241                         return *rit;
2242         return NULL;
2243 }
2244
2245 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2246 {
2247         ModelAction *parent = get_last_action(tid);
2248         if (!parent)
2249                 parent = get_thread(tid)->get_creation();
2250         return parent;
2251 }
2252
2253 /**
2254  * Returns the clock vector for a given thread.
2255  * @param tid The thread whose clock vector we want
2256  * @return Desired clock vector
2257  */
2258 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2259 {
2260         return get_parent_action(tid)->get_cv();
2261 }
2262
2263 /**
2264  * Resolve a set of Promises with a current write. The set is provided in the
2265  * Node corresponding to @a write.
2266  * @param write The ModelAction that is fulfilling Promises
2267  * @return True if promises were resolved; false otherwise
2268  */
2269 bool ModelChecker::resolve_promises(ModelAction *write)
2270 {
2271         bool haveResolved = false;
2272         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2273         promise_list_t mustResolve, resolved;
2274
2275         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2276                 Promise *promise = (*promises)[promise_index];
2277                 if (write->get_node()->get_promise(i)) {
2278                         ModelAction *read = promise->get_action();
2279                         read_from(read, write);
2280                         //Make sure the promise's value matches the write's value
2281                         ASSERT(promise->is_compatible(write));
2282                         mo_graph->resolvePromise(read, write, &mustResolve);
2283
2284                         resolved.push_back(promise);
2285                         promises->erase(promises->begin() + promise_index);
2286                         actions_to_check.push_back(read);
2287
2288                         haveResolved = true;
2289                 } else
2290                         promise_index++;
2291         }
2292
2293         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2294                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2295                                 == resolved.end())
2296                         priv->failed_promise = true;
2297         }
2298         for (unsigned int i = 0; i < resolved.size(); i++)
2299                 delete resolved[i];
2300         //Check whether reading these writes has made threads unable to
2301         //resolve promises
2302
2303         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2304                 ModelAction *read = actions_to_check[i];
2305                 mo_check_promises(read, true);
2306         }
2307
2308         return haveResolved;
2309 }
2310
2311 /**
2312  * Compute the set of promises that could potentially be satisfied by this
2313  * action. Note that the set computation actually appears in the Node, not in
2314  * ModelChecker.
2315  * @param curr The ModelAction that may satisfy promises
2316  */
2317 void ModelChecker::compute_promises(ModelAction *curr)
2318 {
2319         for (unsigned int i = 0; i < promises->size(); i++) {
2320                 Promise *promise = (*promises)[i];
2321                 const ModelAction *act = promise->get_action();
2322                 if (!act->happens_before(curr) &&
2323                                 act->is_read() &&
2324                                 !act->could_synchronize_with(curr) &&
2325                                 !act->same_thread(curr) &&
2326                                 act->get_location() == curr->get_location() &&
2327                                 promise->get_value() == curr->get_value()) {
2328                         curr->get_node()->set_promise(i, act->is_rmw());
2329                 }
2330         }
2331 }
2332
2333 /** Checks promises in response to change in ClockVector Threads. */
2334 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2335 {
2336         for (unsigned int i = 0; i < promises->size(); i++) {
2337                 Promise *promise = (*promises)[i];
2338                 const ModelAction *act = promise->get_action();
2339                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2340                                 merge_cv->synchronized_since(act)) {
2341                         if (promise->eliminate_thread(tid)) {
2342                                 //Promise has failed
2343                                 priv->failed_promise = true;
2344                                 return;
2345                         }
2346                 }
2347         }
2348 }
2349
2350 void ModelChecker::check_promises_thread_disabled()
2351 {
2352         for (unsigned int i = 0; i < promises->size(); i++) {
2353                 Promise *promise = (*promises)[i];
2354                 if (promise->has_failed()) {
2355                         priv->failed_promise = true;
2356                         return;
2357                 }
2358         }
2359 }
2360
2361 /**
2362  * @brief Checks promises in response to addition to modification order for
2363  * threads.
2364  *
2365  * We test whether threads are still available for satisfying promises after an
2366  * addition to our modification order constraints. Those that are unavailable
2367  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2368  * that promise has failed.
2369  *
2370  * @param act The ModelAction which updated the modification order
2371  * @param is_read_check Should be true if act is a read and we must check for
2372  * updates to the store from which it read (there is a distinction here for
2373  * RMW's, which are both a load and a store)
2374  */
2375 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2376 {
2377         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2378
2379         for (unsigned int i = 0; i < promises->size(); i++) {
2380                 Promise *promise = (*promises)[i];
2381                 const ModelAction *pread = promise->get_action();
2382
2383                 // Is this promise on the same location?
2384                 if (!pread->same_var(write))
2385                         continue;
2386
2387                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2388                         priv->failed_promise = true;
2389                         return;
2390                 }
2391
2392                 // Don't do any lookups twice for the same thread
2393                 if (!promise->thread_is_available(act->get_tid()))
2394                         continue;
2395
2396                 if (mo_graph->checkReachable(promise, write)) {
2397                         if (mo_graph->checkPromise(write, promise)) {
2398                                 priv->failed_promise = true;
2399                                 return;
2400                         }
2401                 }
2402         }
2403 }
2404
2405 /**
2406  * Compute the set of writes that may break the current pending release
2407  * sequence. This information is extracted from previou release sequence
2408  * calculations.
2409  *
2410  * @param curr The current ModelAction. Must be a release sequence fixup
2411  * action.
2412  */
2413 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2414 {
2415         if (pending_rel_seqs->empty())
2416                 return;
2417
2418         struct release_seq *pending = pending_rel_seqs->back();
2419         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2420                 const ModelAction *write = pending->writes[i];
2421                 curr->get_node()->add_relseq_break(write);
2422         }
2423
2424         /* NULL means don't break the sequence; just synchronize */
2425         curr->get_node()->add_relseq_break(NULL);
2426 }
2427
2428 /**
2429  * Build up an initial set of all past writes that this 'read' action may read
2430  * from. This set is determined by the clock vector's "happens before"
2431  * relationship.
2432  * @param curr is the current ModelAction that we are exploring; it must be a
2433  * 'read' operation.
2434  */
2435 void ModelChecker::build_reads_from_past(ModelAction *curr)
2436 {
2437         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2438         unsigned int i;
2439         ASSERT(curr->is_read());
2440
2441         ModelAction *last_sc_write = NULL;
2442
2443         if (curr->is_seqcst())
2444                 last_sc_write = get_last_seq_cst_write(curr);
2445
2446         /* Iterate over all threads */
2447         for (i = 0; i < thrd_lists->size(); i++) {
2448                 /* Iterate over actions in thread, starting from most recent */
2449                 action_list_t *list = &(*thrd_lists)[i];
2450                 action_list_t::reverse_iterator rit;
2451                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2452                         ModelAction *act = *rit;
2453
2454                         /* Only consider 'write' actions */
2455                         if (!act->is_write() || act == curr)
2456                                 continue;
2457
2458                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2459                         bool allow_read = true;
2460
2461                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2462                                 allow_read = false;
2463                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2464                                 allow_read = false;
2465
2466                         if (allow_read) {
2467                                 /* Only add feasible reads */
2468                                 mo_graph->startChanges();
2469                                 r_modification_order(curr, act);
2470                                 if (!is_infeasible())
2471                                         curr->get_node()->add_read_from(act);
2472                                 mo_graph->rollbackChanges();
2473                         }
2474
2475                         /* Include at most one act per-thread that "happens before" curr */
2476                         if (act->happens_before(curr))
2477                                 break;
2478                 }
2479         }
2480         /* We may find no valid may-read-from only if the execution is doomed */
2481         if (!curr->get_node()->get_read_from_size()) {
2482                 priv->no_valid_reads = true;
2483                 set_assert();
2484         }
2485
2486         if (DBG_ENABLED()) {
2487                 model_print("Reached read action:\n");
2488                 curr->print();
2489                 model_print("Printing may_read_from\n");
2490                 curr->get_node()->print_may_read_from();
2491                 model_print("End printing may_read_from\n");
2492         }
2493 }
2494
2495 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2496 {
2497         while (true) {
2498                 /* UNINIT actions don't have a Node, and they never sleep */
2499                 if (write->is_uninitialized())
2500                         return true;
2501                 Node *prevnode = write->get_node()->get_parent();
2502
2503                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2504                 if (write->is_release() && thread_sleep)
2505                         return true;
2506                 if (!write->is_rmw()) {
2507                         return false;
2508                 }
2509                 if (write->get_reads_from() == NULL)
2510                         return true;
2511                 write = write->get_reads_from();
2512         }
2513 }
2514
2515 /**
2516  * @brief Create a new action representing an uninitialized atomic
2517  * @param location The memory location of the atomic object
2518  * @return A pointer to a new ModelAction
2519  */
2520 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2521 {
2522         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2523         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2524         act->create_cv(NULL);
2525         return act;
2526 }
2527
2528 static void print_list(action_list_t *list)
2529 {
2530         action_list_t::iterator it;
2531
2532         model_print("---------------------------------------------------------------------\n");
2533
2534         unsigned int hash = 0;
2535
2536         for (it = list->begin(); it != list->end(); it++) {
2537                 (*it)->print();
2538                 hash = hash^(hash<<3)^((*it)->hash());
2539         }
2540         model_print("HASH %u\n", hash);
2541         model_print("---------------------------------------------------------------------\n");
2542 }
2543
2544 #if SUPPORT_MOD_ORDER_DUMP
2545 void ModelChecker::dumpGraph(char *filename) const
2546 {
2547         char buffer[200];
2548         sprintf(buffer, "%s.dot", filename);
2549         FILE *file = fopen(buffer, "w");
2550         fprintf(file, "digraph %s {\n", filename);
2551         mo_graph->dumpNodes(file);
2552         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2553
2554         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2555                 ModelAction *action = *it;
2556                 if (action->is_read()) {
2557                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2558                         if (action->get_reads_from() != NULL)
2559                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2560                 }
2561                 if (thread_array[action->get_tid()] != NULL) {
2562                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2563                 }
2564
2565                 thread_array[action->get_tid()] = action;
2566         }
2567         fprintf(file, "}\n");
2568         model_free(thread_array);
2569         fclose(file);
2570 }
2571 #endif
2572
2573 /** @brief Prints an execution trace summary. */
2574 void ModelChecker::print_summary() const
2575 {
2576 #if SUPPORT_MOD_ORDER_DUMP
2577         char buffername[100];
2578         sprintf(buffername, "exec%04u", stats.num_total);
2579         mo_graph->dumpGraphToFile(buffername);
2580         sprintf(buffername, "graph%04u", stats.num_total);
2581         dumpGraph(buffername);
2582 #endif
2583
2584         model_print("Execution %d:", stats.num_total);
2585         if (isfeasibleprefix())
2586                 model_print("\n");
2587         else
2588                 print_infeasibility(" INFEASIBLE");
2589         print_list(action_trace);
2590         model_print("\n");
2591 }
2592
2593 /**
2594  * Add a Thread to the system for the first time. Should only be called once
2595  * per thread.
2596  * @param t The Thread to add
2597  */
2598 void ModelChecker::add_thread(Thread *t)
2599 {
2600         thread_map->put(id_to_int(t->get_id()), t);
2601         scheduler->add_thread(t);
2602 }
2603
2604 /**
2605  * Removes a thread from the scheduler.
2606  * @param the thread to remove.
2607  */
2608 void ModelChecker::remove_thread(Thread *t)
2609 {
2610         scheduler->remove_thread(t);
2611 }
2612
2613 /**
2614  * @brief Get a Thread reference by its ID
2615  * @param tid The Thread's ID
2616  * @return A Thread reference
2617  */
2618 Thread * ModelChecker::get_thread(thread_id_t tid) const
2619 {
2620         return thread_map->get(id_to_int(tid));
2621 }
2622
2623 /**
2624  * @brief Get a reference to the Thread in which a ModelAction was executed
2625  * @param act The ModelAction
2626  * @return A Thread reference
2627  */
2628 Thread * ModelChecker::get_thread(const ModelAction *act) const
2629 {
2630         return get_thread(act->get_tid());
2631 }
2632
2633 /**
2634  * @brief Check if a Thread is currently enabled
2635  * @param t The Thread to check
2636  * @return True if the Thread is currently enabled
2637  */
2638 bool ModelChecker::is_enabled(Thread *t) const
2639 {
2640         return scheduler->is_enabled(t);
2641 }
2642
2643 /**
2644  * @brief Check if a Thread is currently enabled
2645  * @param tid The ID of the Thread to check
2646  * @return True if the Thread is currently enabled
2647  */
2648 bool ModelChecker::is_enabled(thread_id_t tid) const
2649 {
2650         return scheduler->is_enabled(tid);
2651 }
2652
2653 /**
2654  * Switch from a user-context to the "master thread" context (a.k.a. system
2655  * context). This switch is made with the intention of exploring a particular
2656  * model-checking action (described by a ModelAction object). Must be called
2657  * from a user-thread context.
2658  *
2659  * @param act The current action that will be explored. May be NULL only if
2660  * trace is exiting via an assertion (see ModelChecker::set_assert and
2661  * ModelChecker::has_asserted).
2662  * @return Return the value returned by the current action
2663  */
2664 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2665 {
2666         DBG();
2667         Thread *old = thread_current();
2668         old->set_pending(act);
2669         if (Thread::swap(old, &system_context) < 0) {
2670                 perror("swap threads");
2671                 exit(EXIT_FAILURE);
2672         }
2673         return old->get_return_value();
2674 }
2675
2676 /**
2677  * Takes the next step in the execution, if possible.
2678  * @param curr The current step to take
2679  * @return Returns the next Thread to run, if any; NULL if this execution
2680  * should terminate
2681  */
2682 Thread * ModelChecker::take_step(ModelAction *curr)
2683 {
2684         if (has_asserted())
2685                 return NULL;
2686
2687         Thread *curr_thrd = get_thread(curr);
2688         ASSERT(curr_thrd->get_state() == THREAD_READY);
2689
2690         curr = check_current_action(curr);
2691
2692         /* Infeasible -> don't take any more steps */
2693         if (is_infeasible())
2694                 return NULL;
2695         else if (isfeasibleprefix() && have_bug_reports()) {
2696                 set_assert();
2697                 return NULL;
2698         }
2699
2700         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2701                 return NULL;
2702
2703         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2704                 scheduler->remove_thread(curr_thrd);
2705
2706         Thread *next_thrd = get_next_thread(curr);
2707         next_thrd = scheduler->next_thread(next_thrd);
2708
2709         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2710                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2711
2712         /*
2713          * Launch end-of-execution release sequence fixups only when there are:
2714          *
2715          * (1) no more user threads to run (or when execution replay chooses
2716          *     the 'model_thread')
2717          * (2) pending release sequences
2718          * (3) pending assertions (i.e., data races)
2719          * (4) no pending promises
2720          */
2721         if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2722                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2723                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2724                                 pending_rel_seqs->size());
2725                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2726                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2727                                 model_thread);
2728                 model_thread->set_pending(fixup);
2729                 return model_thread;
2730         }
2731
2732         /* next_thrd == NULL -> don't take any more steps */
2733         if (!next_thrd)
2734                 return NULL;
2735
2736         return next_thrd;
2737 }
2738
2739 /** Wrapper to run the user's main function, with appropriate arguments */
2740 void user_main_wrapper(void *)
2741 {
2742         user_main(model->params.argc, model->params.argv);
2743 }
2744
2745 /** @brief Run ModelChecker for the user program */
2746 void ModelChecker::run()
2747 {
2748         do {
2749                 thrd_t user_thread;
2750                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2751                 add_thread(t);
2752
2753                 do {
2754                         scheduler->next_thread(t);
2755                         Thread::swap(&system_context, t);
2756
2757                         /* Consume the next action for a Thread */
2758                         ModelAction *curr = t->get_pending();
2759                         t->set_pending(NULL);
2760                         t = take_step(curr);
2761                 } while (t && !t->is_model_thread());
2762                 /** @TODO Re-write release sequence fixups here */
2763         } while (next_execution());
2764
2765         print_stats();
2766 }