snapshot: turn C++ interface into C interface
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 current_action(NULL),
43                 /* First thread created will have id INITIAL_THREAD_ID */
44                 next_thread_id(INITIAL_THREAD_ID),
45                 used_sequence_numbers(0),
46                 next_backtrack(NULL),
47                 bugs(),
48                 stats(),
49                 failed_promise(false),
50                 too_many_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         ModelAction *current_action;
62         unsigned int next_thread_id;
63         modelclock_t used_sequence_numbers;
64         ModelAction *next_backtrack;
65         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66         struct execution_stats stats;
67         bool failed_promise;
68         bool too_many_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         snapshot_backtrack_before(0);
163 }
164
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
167 {
168         return priv->next_thread_id++;
169 }
170
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
173 {
174         return priv->next_thread_id;
175 }
176
177 /**
178  * Must be called from user-thread context (e.g., through the global
179  * thread_current() interface)
180  *
181  * @return The currently executing Thread.
182  */
183 Thread * ModelChecker::get_current_thread() const
184 {
185         return scheduler->get_current_thread();
186 }
187
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
190 {
191         return ++priv->used_sequence_numbers;
192 }
193
194 Node * ModelChecker::get_curr_node() const
195 {
196         return node_stack->get_head();
197 }
198
199 /**
200  * @brief Choose the next thread to execute.
201  *
202  * This function chooses the next thread that should execute. It can force the
203  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205  * The model-checker may have no preference regarding the next thread (i.e.,
206  * when exploring a new execution ordering), in which case this will return
207  * NULL.
208  * @param curr The current ModelAction. This action might guide the choice of
209  * next thread.
210  * @return The next thread to run. If the model-checker has no preference, NULL.
211  */
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
213 {
214         thread_id_t tid;
215
216         if (curr != NULL) {
217                 /* Do not split atomic actions. */
218                 if (curr->is_rmwr())
219                         return thread_current();
220                 else if (curr->get_type() == THREAD_CREATE)
221                         return curr->get_thread_operand();
222         }
223
224         /* Have we completed exploring the preselected path? */
225         if (diverge == NULL)
226                 return NULL;
227
228         /* Else, we are trying to replay an execution */
229         ModelAction *next = node_stack->get_next()->get_action();
230
231         if (next == diverge) {
232                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
233                         earliest_diverge = diverge;
234
235                 Node *nextnode = next->get_node();
236                 Node *prevnode = nextnode->get_parent();
237                 scheduler->update_sleep_set(prevnode);
238
239                 /* Reached divergence point */
240                 if (nextnode->increment_misc()) {
241                         /* The next node will try to satisfy a different misc_index values. */
242                         tid = next->get_tid();
243                         node_stack->pop_restofstack(2);
244                 } else if (nextnode->increment_promise()) {
245                         /* The next node will try to satisfy a different set of promises. */
246                         tid = next->get_tid();
247                         node_stack->pop_restofstack(2);
248                 } else if (nextnode->increment_read_from()) {
249                         /* The next node will read from a different value. */
250                         tid = next->get_tid();
251                         node_stack->pop_restofstack(2);
252                 } else if (nextnode->increment_future_value()) {
253                         /* The next node will try to read from a different future value. */
254                         tid = next->get_tid();
255                         node_stack->pop_restofstack(2);
256                 } else if (nextnode->increment_relseq_break()) {
257                         /* The next node will try to resolve a release sequence differently */
258                         tid = next->get_tid();
259                         node_stack->pop_restofstack(2);
260                 } else {
261                         ASSERT(prevnode);
262                         /* Make a different thread execute for next step */
263                         scheduler->add_sleep(get_thread(next->get_tid()));
264                         tid = prevnode->get_next_backtrack();
265                         /* Make sure the backtracked thread isn't sleeping. */
266                         node_stack->pop_restofstack(1);
267                         if (diverge == earliest_diverge) {
268                                 earliest_diverge = prevnode->get_action();
269                         }
270                 }
271                 /* The correct sleep set is in the parent node. */
272                 execute_sleep_set();
273
274                 DEBUG("*** Divergence point ***\n");
275
276                 diverge = NULL;
277         } else {
278                 tid = next->get_tid();
279         }
280         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
281         ASSERT(tid != THREAD_ID_T_NONE);
282         return thread_map->get(id_to_int(tid));
283 }
284
285 /**
286  * We need to know what the next actions of all threads in the sleep
287  * set will be.  This method computes them and stores the actions at
288  * the corresponding thread object's pending action.
289  */
290
291 void ModelChecker::execute_sleep_set()
292 {
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *thr = get_thread(tid);
296                 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
297                         thr->set_state(THREAD_RUNNING);
298                         scheduler->next_thread(thr);
299                         Thread::swap(&system_context, thr);
300                         priv->current_action->set_sleep_flag();
301                         thr->set_pending(priv->current_action);
302                 }
303         }
304 }
305
306 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
307 {
308         for (unsigned int i = 0; i < get_num_threads(); i++) {
309                 Thread *thr = get_thread(int_to_id(i));
310                 if (scheduler->is_sleep_set(thr)) {
311                         ModelAction *pending_act = thr->get_pending();
312                         if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
313                                 //Remove this thread from sleep set
314                                 scheduler->remove_sleep(thr);
315                 }
316         }
317 }
318
319 /** @brief Alert the model-checker that an incorrectly-ordered
320  * synchronization was made */
321 void ModelChecker::set_bad_synchronization()
322 {
323         priv->bad_synchronization = true;
324 }
325
326 bool ModelChecker::has_asserted() const
327 {
328         return priv->asserted;
329 }
330
331 void ModelChecker::set_assert()
332 {
333         priv->asserted = true;
334 }
335
336 /**
337  * Check if we are in a deadlock. Should only be called at the end of an
338  * execution, although it should not give false positives in the middle of an
339  * execution (there should be some ENABLED thread).
340  *
341  * @return True if program is in a deadlock; false otherwise
342  */
343 bool ModelChecker::is_deadlocked() const
344 {
345         bool blocking_threads = false;
346         for (unsigned int i = 0; i < get_num_threads(); i++) {
347                 thread_id_t tid = int_to_id(i);
348                 if (is_enabled(tid))
349                         return false;
350                 Thread *t = get_thread(tid);
351                 if (!t->is_model_thread() && t->get_pending())
352                         blocking_threads = true;
353         }
354         return blocking_threads;
355 }
356
357 /**
358  * Check if this is a complete execution. That is, have all thread completed
359  * execution (rather than exiting because sleep sets have forced a redundant
360  * execution).
361  *
362  * @return True if the execution is complete.
363  */
364 bool ModelChecker::is_complete_execution() const
365 {
366         for (unsigned int i = 0; i < get_num_threads(); i++)
367                 if (is_enabled(int_to_id(i)))
368                         return false;
369         return true;
370 }
371
372 /**
373  * @brief Assert a bug in the executing program.
374  *
375  * Use this function to assert any sort of bug in the user program. If the
376  * current trace is feasible (actually, a prefix of some feasible execution),
377  * then this execution will be aborted, printing the appropriate message. If
378  * the current trace is not yet feasible, the error message will be stashed and
379  * printed if the execution ever becomes feasible.
380  *
381  * @param msg Descriptive message for the bug (do not include newline char)
382  * @return True if bug is immediately-feasible
383  */
384 bool ModelChecker::assert_bug(const char *msg)
385 {
386         priv->bugs.push_back(new bug_message(msg));
387
388         if (isfeasibleprefix()) {
389                 set_assert();
390                 return true;
391         }
392         return false;
393 }
394
395 /**
396  * @brief Assert a bug in the executing program, asserted by a user thread
397  * @see ModelChecker::assert_bug
398  * @param msg Descriptive message for the bug (do not include newline char)
399  */
400 void ModelChecker::assert_user_bug(const char *msg)
401 {
402         /* If feasible bug, bail out now */
403         if (assert_bug(msg))
404                 switch_to_master(NULL);
405 }
406
407 /** @return True, if any bugs have been reported for this execution */
408 bool ModelChecker::have_bug_reports() const
409 {
410         return priv->bugs.size() != 0;
411 }
412
413 /** @brief Print bug report listing for this execution (if any bugs exist) */
414 void ModelChecker::print_bugs() const
415 {
416         if (have_bug_reports()) {
417                 model_print("Bug report: %zu bug%s detected\n",
418                                 priv->bugs.size(),
419                                 priv->bugs.size() > 1 ? "s" : "");
420                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
421                         priv->bugs[i]->print();
422         }
423 }
424
425 /**
426  * @brief Record end-of-execution stats
427  *
428  * Must be run when exiting an execution. Records various stats.
429  * @see struct execution_stats
430  */
431 void ModelChecker::record_stats()
432 {
433         stats.num_total++;
434         if (!isfeasibleprefix())
435                 stats.num_infeasible++;
436         else if (have_bug_reports())
437                 stats.num_buggy_executions++;
438         else if (is_complete_execution())
439                 stats.num_complete++;
440         else
441                 stats.num_redundant++;
442 }
443
444 /** @brief Print execution stats */
445 void ModelChecker::print_stats() const
446 {
447         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
448         model_print("Number of redundant executions: %d\n", stats.num_redundant);
449         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
450         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
451         model_print("Total executions: %d\n", stats.num_total);
452         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
453 }
454
455 /**
456  * @brief End-of-exeuction print
457  * @param printbugs Should any existing bugs be printed?
458  */
459 void ModelChecker::print_execution(bool printbugs) const
460 {
461         print_program_output();
462
463         if (DBG_ENABLED() || params.verbose) {
464                 model_print("Earliest divergence point since last feasible execution:\n");
465                 if (earliest_diverge)
466                         earliest_diverge->print();
467                 else
468                         model_print("(Not set)\n");
469
470                 model_print("\n");
471                 print_stats();
472         }
473
474         /* Don't print invalid bugs */
475         if (printbugs)
476                 print_bugs();
477
478         model_print("\n");
479         print_summary();
480 }
481
482 /**
483  * Queries the model-checker for more executions to explore and, if one
484  * exists, resets the model-checker state to execute a new execution.
485  *
486  * @return If there are more executions to explore, return true. Otherwise,
487  * return false.
488  */
489 bool ModelChecker::next_execution()
490 {
491         DBG();
492         /* Is this execution a feasible execution that's worth bug-checking? */
493         bool complete = isfeasibleprefix() && (is_complete_execution() ||
494                         have_bug_reports());
495
496         /* End-of-execution bug checks */
497         if (complete) {
498                 if (is_deadlocked())
499                         assert_bug("Deadlock detected");
500
501                 checkDataRaces();
502         }
503
504         record_stats();
505
506         /* Output */
507         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
508                 print_execution(complete);
509         else
510                 clear_program_output();
511
512         if (complete)
513                 earliest_diverge = NULL;
514
515         if ((diverge = get_next_backtrack()) == NULL)
516                 return false;
517
518         if (DBG_ENABLED()) {
519                 model_print("Next execution will diverge at:\n");
520                 diverge->print();
521         }
522
523         reset_to_initial_state();
524         return true;
525 }
526
527 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
528 {
529         switch (act->get_type()) {
530         case ATOMIC_FENCE:
531         case ATOMIC_READ:
532         case ATOMIC_WRITE:
533         case ATOMIC_RMW: {
534                 /* Optimization: relaxed operations don't need backtracking */
535                 if (act->is_relaxed())
536                         return NULL;
537                 /* linear search: from most recent to oldest */
538                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
539                 action_list_t::reverse_iterator rit;
540                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
541                         ModelAction *prev = *rit;
542                         if (prev->could_synchronize_with(act))
543                                 return prev;
544                 }
545                 break;
546         }
547         case ATOMIC_LOCK:
548         case ATOMIC_TRYLOCK: {
549                 /* linear search: from most recent to oldest */
550                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
551                 action_list_t::reverse_iterator rit;
552                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
553                         ModelAction *prev = *rit;
554                         if (act->is_conflicting_lock(prev))
555                                 return prev;
556                 }
557                 break;
558         }
559         case ATOMIC_UNLOCK: {
560                 /* linear search: from most recent to oldest */
561                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
562                 action_list_t::reverse_iterator rit;
563                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
564                         ModelAction *prev = *rit;
565                         if (!act->same_thread(prev) && prev->is_failed_trylock())
566                                 return prev;
567                 }
568                 break;
569         }
570         case ATOMIC_WAIT: {
571                 /* linear search: from most recent to oldest */
572                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
573                 action_list_t::reverse_iterator rit;
574                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
575                         ModelAction *prev = *rit;
576                         if (!act->same_thread(prev) && prev->is_failed_trylock())
577                                 return prev;
578                         if (!act->same_thread(prev) && prev->is_notify())
579                                 return prev;
580                 }
581                 break;
582         }
583
584         case ATOMIC_NOTIFY_ALL:
585         case ATOMIC_NOTIFY_ONE: {
586                 /* linear search: from most recent to oldest */
587                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
588                 action_list_t::reverse_iterator rit;
589                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
590                         ModelAction *prev = *rit;
591                         if (!act->same_thread(prev) && prev->is_wait())
592                                 return prev;
593                 }
594                 break;
595         }
596         default:
597                 break;
598         }
599         return NULL;
600 }
601
602 /** This method finds backtracking points where we should try to
603  * reorder the parameter ModelAction against.
604  *
605  * @param the ModelAction to find backtracking points for.
606  */
607 void ModelChecker::set_backtracking(ModelAction *act)
608 {
609         Thread *t = get_thread(act);
610         ModelAction *prev = get_last_conflict(act);
611         if (prev == NULL)
612                 return;
613
614         Node *node = prev->get_node()->get_parent();
615
616         int low_tid, high_tid;
617         if (node->is_enabled(t)) {
618                 low_tid = id_to_int(act->get_tid());
619                 high_tid = low_tid + 1;
620         } else {
621                 low_tid = 0;
622                 high_tid = get_num_threads();
623         }
624
625         for (int i = low_tid; i < high_tid; i++) {
626                 thread_id_t tid = int_to_id(i);
627
628                 /* Make sure this thread can be enabled here. */
629                 if (i >= node->get_num_threads())
630                         break;
631
632                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
633                 if (node->enabled_status(tid) != THREAD_ENABLED)
634                         continue;
635
636                 /* Check if this has been explored already */
637                 if (node->has_been_explored(tid))
638                         continue;
639
640                 /* See if fairness allows */
641                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
642                         bool unfair = false;
643                         for (int t = 0; t < node->get_num_threads(); t++) {
644                                 thread_id_t tother = int_to_id(t);
645                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
646                                         unfair = true;
647                                         break;
648                                 }
649                         }
650                         if (unfair)
651                                 continue;
652                 }
653                 /* Cache the latest backtracking point */
654                 set_latest_backtrack(prev);
655
656                 /* If this is a new backtracking point, mark the tree */
657                 if (!node->set_backtrack(tid))
658                         continue;
659                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
660                                         id_to_int(prev->get_tid()),
661                                         id_to_int(t->get_id()));
662                 if (DBG_ENABLED()) {
663                         prev->print();
664                         act->print();
665                 }
666         }
667 }
668
669 /**
670  * @brief Cache the a backtracking point as the "most recent", if eligible
671  *
672  * Note that this does not prepare the NodeStack for this backtracking
673  * operation, it only caches the action on a per-execution basis
674  *
675  * @param act The operation at which we should explore a different next action
676  * (i.e., backtracking point)
677  * @return True, if this action is now the most recent backtracking point;
678  * false otherwise
679  */
680 bool ModelChecker::set_latest_backtrack(ModelAction *act)
681 {
682         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
683                 priv->next_backtrack = act;
684                 return true;
685         }
686         return false;
687 }
688
689 /**
690  * Returns last backtracking point. The model checker will explore a different
691  * path for this point in the next execution.
692  * @return The ModelAction at which the next execution should diverge.
693  */
694 ModelAction * ModelChecker::get_next_backtrack()
695 {
696         ModelAction *next = priv->next_backtrack;
697         priv->next_backtrack = NULL;
698         return next;
699 }
700
701 /**
702  * Processes a read or rmw model action.
703  * @param curr is the read model action to process.
704  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
705  * @return True if processing this read updates the mo_graph.
706  */
707 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
708 {
709         uint64_t value = VALUE_NONE;
710         bool updated = false;
711         while (true) {
712                 const ModelAction *reads_from = curr->get_node()->get_read_from();
713                 if (reads_from != NULL) {
714                         mo_graph->startChanges();
715
716                         value = reads_from->get_value();
717                         bool r_status = false;
718
719                         if (!second_part_of_rmw) {
720                                 check_recency(curr, reads_from);
721                                 r_status = r_modification_order(curr, reads_from);
722                         }
723
724
725                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
726                                 mo_graph->rollbackChanges();
727                                 priv->too_many_reads = false;
728                                 continue;
729                         }
730
731                         read_from(curr, reads_from);
732                         mo_graph->commitChanges();
733                         mo_check_promises(curr->get_tid(), reads_from);
734
735                         updated |= r_status;
736                 } else if (!second_part_of_rmw) {
737                         /* Read from future value */
738                         value = curr->get_node()->get_future_value();
739                         modelclock_t expiration = curr->get_node()->get_future_value_expiration();
740                         curr->set_read_from(NULL);
741                         Promise *valuepromise = new Promise(curr, value, expiration);
742                         promises->push_back(valuepromise);
743                 }
744                 get_thread(curr)->set_return_value(value);
745                 return updated;
746         }
747 }
748
749 /**
750  * Processes a lock, trylock, or unlock model action.  @param curr is
751  * the read model action to process.
752  *
753  * The try lock operation checks whether the lock is taken.  If not,
754  * it falls to the normal lock operation case.  If so, it returns
755  * fail.
756  *
757  * The lock operation has already been checked that it is enabled, so
758  * it just grabs the lock and synchronizes with the previous unlock.
759  *
760  * The unlock operation has to re-enable all of the threads that are
761  * waiting on the lock.
762  *
763  * @return True if synchronization was updated; false otherwise
764  */
765 bool ModelChecker::process_mutex(ModelAction *curr)
766 {
767         std::mutex *mutex = NULL;
768         struct std::mutex_state *state = NULL;
769
770         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
771                 mutex = (std::mutex *)curr->get_location();
772                 state = mutex->get_state();
773         } else if (curr->is_wait()) {
774                 mutex = (std::mutex *)curr->get_value();
775                 state = mutex->get_state();
776         }
777
778         switch (curr->get_type()) {
779         case ATOMIC_TRYLOCK: {
780                 bool success = !state->islocked;
781                 curr->set_try_lock(success);
782                 if (!success) {
783                         get_thread(curr)->set_return_value(0);
784                         break;
785                 }
786                 get_thread(curr)->set_return_value(1);
787         }
788                 //otherwise fall into the lock case
789         case ATOMIC_LOCK: {
790                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
791                         assert_bug("Lock access before initialization");
792                 state->islocked = true;
793                 ModelAction *unlock = get_last_unlock(curr);
794                 //synchronize with the previous unlock statement
795                 if (unlock != NULL) {
796                         curr->synchronize_with(unlock);
797                         return true;
798                 }
799                 break;
800         }
801         case ATOMIC_UNLOCK: {
802                 //unlock the lock
803                 state->islocked = false;
804                 //wake up the other threads
805                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
806                 //activate all the waiting threads
807                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
808                         scheduler->wake(get_thread(*rit));
809                 }
810                 waiters->clear();
811                 break;
812         }
813         case ATOMIC_WAIT: {
814                 //unlock the lock
815                 state->islocked = false;
816                 //wake up the other threads
817                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
818                 //activate all the waiting threads
819                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
820                         scheduler->wake(get_thread(*rit));
821                 }
822                 waiters->clear();
823                 //check whether we should go to sleep or not...simulate spurious failures
824                 if (curr->get_node()->get_misc() == 0) {
825                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
826                         //disable us
827                         scheduler->sleep(get_thread(curr));
828                 }
829                 break;
830         }
831         case ATOMIC_NOTIFY_ALL: {
832                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
833                 //activate all the waiting threads
834                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
835                         scheduler->wake(get_thread(*rit));
836                 }
837                 waiters->clear();
838                 break;
839         }
840         case ATOMIC_NOTIFY_ONE: {
841                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
842                 int wakeupthread = curr->get_node()->get_misc();
843                 action_list_t::iterator it = waiters->begin();
844                 advance(it, wakeupthread);
845                 scheduler->wake(get_thread(*it));
846                 waiters->erase(it);
847                 break;
848         }
849
850         default:
851                 ASSERT(0);
852         }
853         return false;
854 }
855
856 /**
857  * Process a write ModelAction
858  * @param curr The ModelAction to process
859  * @return True if the mo_graph was updated or promises were resolved
860  */
861 bool ModelChecker::process_write(ModelAction *curr)
862 {
863         bool updated_mod_order = w_modification_order(curr);
864         bool updated_promises = resolve_promises(curr);
865
866         if (promises->size() == 0) {
867                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
868                         struct PendingFutureValue pfv = (*futurevalues)[i];
869                         //Do more ambitious checks now that mo is more complete
870                         if (mo_may_allow(pfv.writer, pfv.act) &&
871                                         pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number() + params.maxfuturedelay))
872                                 set_latest_backtrack(pfv.act);
873                 }
874                 futurevalues->resize(0);
875         }
876
877         mo_graph->commitChanges();
878         mo_check_promises(curr->get_tid(), curr);
879
880         get_thread(curr)->set_return_value(VALUE_NONE);
881         return updated_mod_order || updated_promises;
882 }
883
884 /**
885  * Process a fence ModelAction
886  * @param curr The ModelAction to process
887  * @return True if synchronization was updated
888  */
889 bool ModelChecker::process_fence(ModelAction *curr)
890 {
891         /*
892          * fence-relaxed: no-op
893          * fence-release: only log the occurence (not in this function), for
894          *   use in later synchronization
895          * fence-acquire (this function): search for hypothetical release
896          *   sequences
897          */
898         bool updated = false;
899         if (curr->is_acquire()) {
900                 action_list_t *list = action_trace;
901                 action_list_t::reverse_iterator rit;
902                 /* Find X : is_read(X) && X --sb-> curr */
903                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
904                         ModelAction *act = *rit;
905                         if (act == curr)
906                                 continue;
907                         if (act->get_tid() != curr->get_tid())
908                                 continue;
909                         /* Stop at the beginning of the thread */
910                         if (act->is_thread_start())
911                                 break;
912                         /* Stop once we reach a prior fence-acquire */
913                         if (act->is_fence() && act->is_acquire())
914                                 break;
915                         if (!act->is_read())
916                                 continue;
917                         /* read-acquire will find its own release sequences */
918                         if (act->is_acquire())
919                                 continue;
920
921                         /* Establish hypothetical release sequences */
922                         rel_heads_list_t release_heads;
923                         get_release_seq_heads(curr, act, &release_heads);
924                         for (unsigned int i = 0; i < release_heads.size(); i++)
925                                 if (!curr->synchronize_with(release_heads[i]))
926                                         set_bad_synchronization();
927                         if (release_heads.size() != 0)
928                                 updated = true;
929                 }
930         }
931         return updated;
932 }
933
934 /**
935  * @brief Process the current action for thread-related activity
936  *
937  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
938  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
939  * synchronization, etc.  This function is a no-op for non-THREAD actions
940  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
941  *
942  * @param curr The current action
943  * @return True if synchronization was updated or a thread completed
944  */
945 bool ModelChecker::process_thread_action(ModelAction *curr)
946 {
947         bool updated = false;
948
949         switch (curr->get_type()) {
950         case THREAD_CREATE: {
951                 Thread *th = curr->get_thread_operand();
952                 th->set_creation(curr);
953                 break;
954         }
955         case THREAD_JOIN: {
956                 Thread *blocking = curr->get_thread_operand();
957                 ModelAction *act = get_last_action(blocking->get_id());
958                 curr->synchronize_with(act);
959                 updated = true; /* trigger rel-seq checks */
960                 break;
961         }
962         case THREAD_FINISH: {
963                 Thread *th = get_thread(curr);
964                 while (!th->wait_list_empty()) {
965                         ModelAction *act = th->pop_wait_list();
966                         scheduler->wake(get_thread(act));
967                 }
968                 th->complete();
969                 updated = true; /* trigger rel-seq checks */
970                 break;
971         }
972         case THREAD_START: {
973                 check_promises(curr->get_tid(), NULL, curr->get_cv());
974                 break;
975         }
976         default:
977                 break;
978         }
979
980         return updated;
981 }
982
983 /**
984  * @brief Process the current action for release sequence fixup activity
985  *
986  * Performs model-checker release sequence fixups for the current action,
987  * forcing a single pending release sequence to break (with a given, potential
988  * "loose" write) or to complete (i.e., synchronize). If a pending release
989  * sequence forms a complete release sequence, then we must perform the fixup
990  * synchronization, mo_graph additions, etc.
991  *
992  * @param curr The current action; must be a release sequence fixup action
993  * @param work_queue The work queue to which to add work items as they are
994  * generated
995  */
996 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
997 {
998         const ModelAction *write = curr->get_node()->get_relseq_break();
999         struct release_seq *sequence = pending_rel_seqs->back();
1000         pending_rel_seqs->pop_back();
1001         ASSERT(sequence);
1002         ModelAction *acquire = sequence->acquire;
1003         const ModelAction *rf = sequence->rf;
1004         const ModelAction *release = sequence->release;
1005         ASSERT(acquire);
1006         ASSERT(release);
1007         ASSERT(rf);
1008         ASSERT(release->same_thread(rf));
1009
1010         if (write == NULL) {
1011                 /**
1012                  * @todo Forcing a synchronization requires that we set
1013                  * modification order constraints. For instance, we can't allow
1014                  * a fixup sequence in which two separate read-acquire
1015                  * operations read from the same sequence, where the first one
1016                  * synchronizes and the other doesn't. Essentially, we can't
1017                  * allow any writes to insert themselves between 'release' and
1018                  * 'rf'
1019                  */
1020
1021                 /* Must synchronize */
1022                 if (!acquire->synchronize_with(release)) {
1023                         set_bad_synchronization();
1024                         return;
1025                 }
1026                 /* Re-check all pending release sequences */
1027                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1028                 /* Re-check act for mo_graph edges */
1029                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1030
1031                 /* propagate synchronization to later actions */
1032                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1033                 for (; (*rit) != acquire; rit++) {
1034                         ModelAction *propagate = *rit;
1035                         if (acquire->happens_before(propagate)) {
1036                                 propagate->synchronize_with(acquire);
1037                                 /* Re-check 'propagate' for mo_graph edges */
1038                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1039                         }
1040                 }
1041         } else {
1042                 /* Break release sequence with new edges:
1043                  *   release --mo--> write --mo--> rf */
1044                 mo_graph->addEdge(release, write);
1045                 mo_graph->addEdge(write, rf);
1046         }
1047
1048         /* See if we have realized a data race */
1049         checkDataRaces();
1050 }
1051
1052 /**
1053  * Initialize the current action by performing one or more of the following
1054  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1055  * in the NodeStack, manipulating backtracking sets, allocating and
1056  * initializing clock vectors, and computing the promises to fulfill.
1057  *
1058  * @param curr The current action, as passed from the user context; may be
1059  * freed/invalidated after the execution of this function, with a different
1060  * action "returned" its place (pass-by-reference)
1061  * @return True if curr is a newly-explored action; false otherwise
1062  */
1063 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1064 {
1065         ModelAction *newcurr;
1066
1067         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1068                 newcurr = process_rmw(*curr);
1069                 delete *curr;
1070
1071                 if (newcurr->is_rmw())
1072                         compute_promises(newcurr);
1073
1074                 *curr = newcurr;
1075                 return false;
1076         }
1077
1078         (*curr)->set_seq_number(get_next_seq_num());
1079
1080         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1081         if (newcurr) {
1082                 /* First restore type and order in case of RMW operation */
1083                 if ((*curr)->is_rmwr())
1084                         newcurr->copy_typeandorder(*curr);
1085
1086                 ASSERT((*curr)->get_location() == newcurr->get_location());
1087                 newcurr->copy_from_new(*curr);
1088
1089                 /* Discard duplicate ModelAction; use action from NodeStack */
1090                 delete *curr;
1091
1092                 /* Always compute new clock vector */
1093                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1094
1095                 *curr = newcurr;
1096                 return false; /* Action was explored previously */
1097         } else {
1098                 newcurr = *curr;
1099
1100                 /* Always compute new clock vector */
1101                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1102
1103                 /* Assign most recent release fence */
1104                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1105
1106                 /*
1107                  * Perform one-time actions when pushing new ModelAction onto
1108                  * NodeStack
1109                  */
1110                 if (newcurr->is_write())
1111                         compute_promises(newcurr);
1112                 else if (newcurr->is_relseq_fixup())
1113                         compute_relseq_breakwrites(newcurr);
1114                 else if (newcurr->is_wait())
1115                         newcurr->get_node()->set_misc_max(2);
1116                 else if (newcurr->is_notify_one()) {
1117                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1118                 }
1119                 return true; /* This was a new ModelAction */
1120         }
1121 }
1122
1123 /**
1124  * @brief Establish reads-from relation between two actions
1125  *
1126  * Perform basic operations involved with establishing a concrete rf relation,
1127  * including setting the ModelAction data and checking for release sequences.
1128  *
1129  * @param act The action that is reading (must be a read)
1130  * @param rf The action from which we are reading (must be a write)
1131  *
1132  * @return True if this read established synchronization
1133  */
1134 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1135 {
1136         act->set_read_from(rf);
1137         if (rf != NULL && act->is_acquire()) {
1138                 rel_heads_list_t release_heads;
1139                 get_release_seq_heads(act, act, &release_heads);
1140                 int num_heads = release_heads.size();
1141                 for (unsigned int i = 0; i < release_heads.size(); i++)
1142                         if (!act->synchronize_with(release_heads[i])) {
1143                                 set_bad_synchronization();
1144                                 num_heads--;
1145                         }
1146                 return num_heads > 0;
1147         }
1148         return false;
1149 }
1150
1151 /**
1152  * @brief Check whether a model action is enabled.
1153  *
1154  * Checks whether a lock or join operation would be successful (i.e., is the
1155  * lock already locked, or is the joined thread already complete). If not, put
1156  * the action in a waiter list.
1157  *
1158  * @param curr is the ModelAction to check whether it is enabled.
1159  * @return a bool that indicates whether the action is enabled.
1160  */
1161 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1162         if (curr->is_lock()) {
1163                 std::mutex *lock = (std::mutex *)curr->get_location();
1164                 struct std::mutex_state *state = lock->get_state();
1165                 if (state->islocked) {
1166                         //Stick the action in the appropriate waiting queue
1167                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1168                         return false;
1169                 }
1170         } else if (curr->get_type() == THREAD_JOIN) {
1171                 Thread *blocking = (Thread *)curr->get_location();
1172                 if (!blocking->is_complete()) {
1173                         blocking->push_wait_list(curr);
1174                         return false;
1175                 }
1176         }
1177
1178         return true;
1179 }
1180
1181 /**
1182  * Stores the ModelAction for the current thread action.  Call this
1183  * immediately before switching from user- to system-context to pass
1184  * data between them.
1185  * @param act The ModelAction created by the user-thread action
1186  */
1187 void ModelChecker::set_current_action(ModelAction *act) {
1188         priv->current_action = act;
1189 }
1190
1191 /**
1192  * This is the heart of the model checker routine. It performs model-checking
1193  * actions corresponding to a given "current action." Among other processes, it
1194  * calculates reads-from relationships, updates synchronization clock vectors,
1195  * forms a memory_order constraints graph, and handles replay/backtrack
1196  * execution when running permutations of previously-observed executions.
1197  *
1198  * @param curr The current action to process
1199  * @return The ModelAction that is actually executed; may be different than
1200  * curr; may be NULL, if the current action is not enabled to run
1201  */
1202 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1203 {
1204         ASSERT(curr);
1205         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1206
1207         if (!check_action_enabled(curr)) {
1208                 /* Make the execution look like we chose to run this action
1209                  * much later, when a lock/join can succeed */
1210                 get_thread(curr)->set_pending(curr);
1211                 scheduler->sleep(get_thread(curr));
1212                 return NULL;
1213         }
1214
1215         bool newly_explored = initialize_curr_action(&curr);
1216
1217         DBG();
1218         if (DBG_ENABLED())
1219                 curr->print();
1220
1221         wake_up_sleeping_actions(curr);
1222
1223         /* Add the action to lists before any other model-checking tasks */
1224         if (!second_part_of_rmw)
1225                 add_action_to_lists(curr);
1226
1227         /* Build may_read_from set for newly-created actions */
1228         if (newly_explored && curr->is_read())
1229                 build_reads_from_past(curr);
1230
1231         /* Initialize work_queue with the "current action" work */
1232         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1233         while (!work_queue.empty() && !has_asserted()) {
1234                 WorkQueueEntry work = work_queue.front();
1235                 work_queue.pop_front();
1236
1237                 switch (work.type) {
1238                 case WORK_CHECK_CURR_ACTION: {
1239                         ModelAction *act = work.action;
1240                         bool update = false; /* update this location's release seq's */
1241                         bool update_all = false; /* update all release seq's */
1242
1243                         if (process_thread_action(curr))
1244                                 update_all = true;
1245
1246                         if (act->is_read() && process_read(act, second_part_of_rmw))
1247                                 update = true;
1248
1249                         if (act->is_write() && process_write(act))
1250                                 update = true;
1251
1252                         if (act->is_fence() && process_fence(act))
1253                                 update_all = true;
1254
1255                         if (act->is_mutex_op() && process_mutex(act))
1256                                 update_all = true;
1257
1258                         if (act->is_relseq_fixup())
1259                                 process_relseq_fixup(curr, &work_queue);
1260
1261                         if (update_all)
1262                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1263                         else if (update)
1264                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1265                         break;
1266                 }
1267                 case WORK_CHECK_RELEASE_SEQ:
1268                         resolve_release_sequences(work.location, &work_queue);
1269                         break;
1270                 case WORK_CHECK_MO_EDGES: {
1271                         /** @todo Complete verification of work_queue */
1272                         ModelAction *act = work.action;
1273                         bool updated = false;
1274
1275                         if (act->is_read()) {
1276                                 const ModelAction *rf = act->get_reads_from();
1277                                 if (rf != NULL && r_modification_order(act, rf))
1278                                         updated = true;
1279                         }
1280                         if (act->is_write()) {
1281                                 if (w_modification_order(act))
1282                                         updated = true;
1283                         }
1284                         mo_graph->commitChanges();
1285
1286                         if (updated)
1287                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1288                         break;
1289                 }
1290                 default:
1291                         ASSERT(false);
1292                         break;
1293                 }
1294         }
1295
1296         check_curr_backtracking(curr);
1297         set_backtracking(curr);
1298         return curr;
1299 }
1300
1301 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1302 {
1303         Node *currnode = curr->get_node();
1304         Node *parnode = currnode->get_parent();
1305
1306         if ((parnode && !parnode->backtrack_empty()) ||
1307                          !currnode->misc_empty() ||
1308                          !currnode->read_from_empty() ||
1309                          !currnode->future_value_empty() ||
1310                          !currnode->promise_empty() ||
1311                          !currnode->relseq_break_empty()) {
1312                 set_latest_backtrack(curr);
1313         }
1314 }
1315
1316 bool ModelChecker::promises_expired() const
1317 {
1318         for (unsigned int i = 0; i < promises->size(); i++) {
1319                 Promise *promise = (*promises)[i];
1320                 if (promise->get_expiration() < priv->used_sequence_numbers)
1321                         return true;
1322         }
1323         return false;
1324 }
1325
1326 /**
1327  * This is the strongest feasibility check available.
1328  * @return whether the current trace (partial or complete) must be a prefix of
1329  * a feasible trace.
1330  */
1331 bool ModelChecker::isfeasibleprefix() const
1332 {
1333         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1334 }
1335
1336 /**
1337  * Returns whether the current completed trace is feasible, except for pending
1338  * release sequences.
1339  */
1340 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1341 {
1342         if (DBG_ENABLED() && promises->size() != 0)
1343                 DEBUG("Infeasible: unrevolved promises\n");
1344
1345         return !is_infeasible() && promises->size() == 0;
1346 }
1347
1348 /**
1349  * Check if the current partial trace is infeasible. Does not check any
1350  * end-of-execution flags, which might rule out the execution. Thus, this is
1351  * useful only for ruling an execution as infeasible.
1352  * @return whether the current partial trace is infeasible.
1353  */
1354 bool ModelChecker::is_infeasible() const
1355 {
1356         if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1357                 DEBUG("Infeasible: RMW violation\n");
1358
1359         return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1360 }
1361
1362 /**
1363  * Check If the current partial trace is infeasible, while ignoring
1364  * infeasibility related to 2 RMW's reading from the same store. It does not
1365  * check end-of-execution feasibility.
1366  * @see ModelChecker::is_infeasible
1367  * @return whether the current partial trace is infeasible, ignoring multiple
1368  * RMWs reading from the same store.
1369  * */
1370 bool ModelChecker::is_infeasible_ignoreRMW() const
1371 {
1372         if (DBG_ENABLED()) {
1373                 if (mo_graph->checkForCycles())
1374                         DEBUG("Infeasible: modification order cycles\n");
1375                 if (priv->failed_promise)
1376                         DEBUG("Infeasible: failed promise\n");
1377                 if (priv->too_many_reads)
1378                         DEBUG("Infeasible: too many reads\n");
1379                 if (priv->bad_synchronization)
1380                         DEBUG("Infeasible: bad synchronization ordering\n");
1381                 if (promises_expired())
1382                         DEBUG("Infeasible: promises expired\n");
1383         }
1384         return mo_graph->checkForCycles() || priv->failed_promise ||
1385                 priv->too_many_reads || priv->bad_synchronization ||
1386                 promises_expired();
1387 }
1388
1389 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1390 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1391         ModelAction *lastread = get_last_action(act->get_tid());
1392         lastread->process_rmw(act);
1393         if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1394                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1395                 mo_graph->commitChanges();
1396         }
1397         return lastread;
1398 }
1399
1400 /**
1401  * Checks whether a thread has read from the same write for too many times
1402  * without seeing the effects of a later write.
1403  *
1404  * Basic idea:
1405  * 1) there must a different write that we could read from that would satisfy the modification order,
1406  * 2) we must have read from the same value in excess of maxreads times, and
1407  * 3) that other write must have been in the reads_from set for maxreads times.
1408  *
1409  * If so, we decide that the execution is no longer feasible.
1410  */
1411 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1412 {
1413         if (params.maxreads != 0) {
1414                 if (curr->get_node()->get_read_from_size() <= 1)
1415                         return;
1416                 //Must make sure that execution is currently feasible...  We could
1417                 //accidentally clear by rolling back
1418                 if (is_infeasible())
1419                         return;
1420                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1421                 int tid = id_to_int(curr->get_tid());
1422
1423                 /* Skip checks */
1424                 if ((int)thrd_lists->size() <= tid)
1425                         return;
1426                 action_list_t *list = &(*thrd_lists)[tid];
1427
1428                 action_list_t::reverse_iterator rit = list->rbegin();
1429                 /* Skip past curr */
1430                 for (; (*rit) != curr; rit++)
1431                         ;
1432                 /* go past curr now */
1433                 rit++;
1434
1435                 action_list_t::reverse_iterator ritcopy = rit;
1436                 //See if we have enough reads from the same value
1437                 int count = 0;
1438                 for (; count < params.maxreads; rit++, count++) {
1439                         if (rit == list->rend())
1440                                 return;
1441                         ModelAction *act = *rit;
1442                         if (!act->is_read())
1443                                 return;
1444
1445                         if (act->get_reads_from() != rf)
1446                                 return;
1447                         if (act->get_node()->get_read_from_size() <= 1)
1448                                 return;
1449                 }
1450                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1451                         /* Get write */
1452                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1453
1454                         /* Need a different write */
1455                         if (write == rf)
1456                                 continue;
1457
1458                         /* Test to see whether this is a feasible write to read from */
1459                         mo_graph->startChanges();
1460                         r_modification_order(curr, write);
1461                         bool feasiblereadfrom = !is_infeasible();
1462                         mo_graph->rollbackChanges();
1463
1464                         if (!feasiblereadfrom)
1465                                 continue;
1466                         rit = ritcopy;
1467
1468                         bool feasiblewrite = true;
1469                         //new we need to see if this write works for everyone
1470
1471                         for (int loop = count; loop > 0; loop--, rit++) {
1472                                 ModelAction *act = *rit;
1473                                 bool foundvalue = false;
1474                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1475                                         if (act->get_node()->get_read_from_at(j) == write) {
1476                                                 foundvalue = true;
1477                                                 break;
1478                                         }
1479                                 }
1480                                 if (!foundvalue) {
1481                                         feasiblewrite = false;
1482                                         break;
1483                                 }
1484                         }
1485                         if (feasiblewrite) {
1486                                 priv->too_many_reads = true;
1487                                 return;
1488                         }
1489                 }
1490         }
1491 }
1492
1493 /**
1494  * Updates the mo_graph with the constraints imposed from the current
1495  * read.
1496  *
1497  * Basic idea is the following: Go through each other thread and find
1498  * the lastest action that happened before our read.  Two cases:
1499  *
1500  * (1) The action is a write => that write must either occur before
1501  * the write we read from or be the write we read from.
1502  *
1503  * (2) The action is a read => the write that that action read from
1504  * must occur before the write we read from or be the same write.
1505  *
1506  * @param curr The current action. Must be a read.
1507  * @param rf The action that curr reads from. Must be a write.
1508  * @return True if modification order edges were added; false otherwise
1509  */
1510 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1511 {
1512         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1513         unsigned int i;
1514         bool added = false;
1515         ASSERT(curr->is_read());
1516
1517         /* Last SC fence in the current thread */
1518         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1519
1520         /* Iterate over all threads */
1521         for (i = 0; i < thrd_lists->size(); i++) {
1522                 /* Last SC fence in thread i */
1523                 ModelAction *last_sc_fence_thread_local = NULL;
1524                 if (int_to_id((int)i) != curr->get_tid())
1525                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1526
1527                 /* Last SC fence in thread i, before last SC fence in current thread */
1528                 ModelAction *last_sc_fence_thread_before = NULL;
1529                 if (last_sc_fence_local)
1530                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1531
1532                 /* Iterate over actions in thread, starting from most recent */
1533                 action_list_t *list = &(*thrd_lists)[i];
1534                 action_list_t::reverse_iterator rit;
1535                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1536                         ModelAction *act = *rit;
1537
1538                         if (act->is_write() && act != rf && act != curr) {
1539                                 /* C++, Section 29.3 statement 5 */
1540                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1541                                                 *act < *last_sc_fence_thread_local) {
1542                                         mo_graph->addEdge(act, rf);
1543                                         added = true;
1544                                         break;
1545                                 }
1546                                 /* C++, Section 29.3 statement 4 */
1547                                 else if (act->is_seqcst() && last_sc_fence_local &&
1548                                                 *act < *last_sc_fence_local) {
1549                                         mo_graph->addEdge(act, rf);
1550                                         added = true;
1551                                         break;
1552                                 }
1553                                 /* C++, Section 29.3 statement 6 */
1554                                 else if (last_sc_fence_thread_before &&
1555                                                 *act < *last_sc_fence_thread_before) {
1556                                         mo_graph->addEdge(act, rf);
1557                                         added = true;
1558                                         break;
1559                                 }
1560                         }
1561
1562                         /*
1563                          * Include at most one act per-thread that "happens
1564                          * before" curr. Don't consider reflexively.
1565                          */
1566                         if (act->happens_before(curr) && act != curr) {
1567                                 if (act->is_write()) {
1568                                         if (rf != act) {
1569                                                 mo_graph->addEdge(act, rf);
1570                                                 added = true;
1571                                         }
1572                                 } else {
1573                                         const ModelAction *prevreadfrom = act->get_reads_from();
1574                                         //if the previous read is unresolved, keep going...
1575                                         if (prevreadfrom == NULL)
1576                                                 continue;
1577
1578                                         if (rf != prevreadfrom) {
1579                                                 mo_graph->addEdge(prevreadfrom, rf);
1580                                                 added = true;
1581                                         }
1582                                 }
1583                                 break;
1584                         }
1585                 }
1586         }
1587
1588         return added;
1589 }
1590
1591 /** This method fixes up the modification order when we resolve a
1592  *  promises.  The basic problem is that actions that occur after the
1593  *  read curr could not property add items to the modification order
1594  *  for our read.
1595  *
1596  *  So for each thread, we find the earliest item that happens after
1597  *  the read curr.  This is the item we have to fix up with additional
1598  *  constraints.  If that action is write, we add a MO edge between
1599  *  the Action rf and that action.  If the action is a read, we add a
1600  *  MO edge between the Action rf, and whatever the read accessed.
1601  *
1602  * @param curr is the read ModelAction that we are fixing up MO edges for.
1603  * @param rf is the write ModelAction that curr reads from.
1604  *
1605  */
1606 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1607 {
1608         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1609         unsigned int i;
1610         ASSERT(curr->is_read());
1611
1612         /* Iterate over all threads */
1613         for (i = 0; i < thrd_lists->size(); i++) {
1614                 /* Iterate over actions in thread, starting from most recent */
1615                 action_list_t *list = &(*thrd_lists)[i];
1616                 action_list_t::reverse_iterator rit;
1617                 ModelAction *lastact = NULL;
1618
1619                 /* Find last action that happens after curr that is either not curr or a rmw */
1620                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1621                         ModelAction *act = *rit;
1622                         if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1623                                 lastact = act;
1624                         } else
1625                                 break;
1626                 }
1627
1628                         /* Include at most one act per-thread that "happens before" curr */
1629                 if (lastact != NULL) {
1630                         if (lastact == curr) {
1631                                 //Case 1: The resolved read is a RMW, and we need to make sure
1632                                 //that the write portion of the RMW mod order after rf
1633
1634                                 mo_graph->addEdge(rf, lastact);
1635                         } else if (lastact->is_read()) {
1636                                 //Case 2: The resolved read is a normal read and the next
1637                                 //operation is a read, and we need to make sure the value read
1638                                 //is mod ordered after rf
1639
1640                                 const ModelAction *postreadfrom = lastact->get_reads_from();
1641                                 if (postreadfrom != NULL && rf != postreadfrom)
1642                                         mo_graph->addEdge(rf, postreadfrom);
1643                         } else {
1644                                 //Case 3: The resolved read is a normal read and the next
1645                                 //operation is a write, and we need to make sure that the
1646                                 //write is mod ordered after rf
1647                                 if (lastact != rf)
1648                                         mo_graph->addEdge(rf, lastact);
1649                         }
1650                         break;
1651                 }
1652         }
1653 }
1654
1655 /**
1656  * Updates the mo_graph with the constraints imposed from the current write.
1657  *
1658  * Basic idea is the following: Go through each other thread and find
1659  * the lastest action that happened before our write.  Two cases:
1660  *
1661  * (1) The action is a write => that write must occur before
1662  * the current write
1663  *
1664  * (2) The action is a read => the write that that action read from
1665  * must occur before the current write.
1666  *
1667  * This method also handles two other issues:
1668  *
1669  * (I) Sequential Consistency: Making sure that if the current write is
1670  * seq_cst, that it occurs after the previous seq_cst write.
1671  *
1672  * (II) Sending the write back to non-synchronizing reads.
1673  *
1674  * @param curr The current action. Must be a write.
1675  * @return True if modification order edges were added; false otherwise
1676  */
1677 bool ModelChecker::w_modification_order(ModelAction *curr)
1678 {
1679         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1680         unsigned int i;
1681         bool added = false;
1682         ASSERT(curr->is_write());
1683
1684         if (curr->is_seqcst()) {
1685                 /* We have to at least see the last sequentially consistent write,
1686                          so we are initialized. */
1687                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1688                 if (last_seq_cst != NULL) {
1689                         mo_graph->addEdge(last_seq_cst, curr);
1690                         added = true;
1691                 }
1692         }
1693
1694         /* Last SC fence in the current thread */
1695         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1696
1697         /* Iterate over all threads */
1698         for (i = 0; i < thrd_lists->size(); i++) {
1699                 /* Last SC fence in thread i, before last SC fence in current thread */
1700                 ModelAction *last_sc_fence_thread_before = NULL;
1701                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1702                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1703
1704                 /* Iterate over actions in thread, starting from most recent */
1705                 action_list_t *list = &(*thrd_lists)[i];
1706                 action_list_t::reverse_iterator rit;
1707                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1708                         ModelAction *act = *rit;
1709                         if (act == curr) {
1710                                 /*
1711                                  * 1) If RMW and it actually read from something, then we
1712                                  * already have all relevant edges, so just skip to next
1713                                  * thread.
1714                                  *
1715                                  * 2) If RMW and it didn't read from anything, we should
1716                                  * whatever edge we can get to speed up convergence.
1717                                  *
1718                                  * 3) If normal write, we need to look at earlier actions, so
1719                                  * continue processing list.
1720                                  */
1721                                 if (curr->is_rmw()) {
1722                                         if (curr->get_reads_from() != NULL)
1723                                                 break;
1724                                         else
1725                                                 continue;
1726                                 } else
1727                                         continue;
1728                         }
1729
1730                         /* C++, Section 29.3 statement 7 */
1731                         if (last_sc_fence_thread_before && act->is_write() &&
1732                                         *act < *last_sc_fence_thread_before) {
1733                                 mo_graph->addEdge(act, curr);
1734                                 added = true;
1735                                 break;
1736                         }
1737
1738                         /*
1739                          * Include at most one act per-thread that "happens
1740                          * before" curr
1741                          */
1742                         if (act->happens_before(curr)) {
1743                                 /*
1744                                  * Note: if act is RMW, just add edge:
1745                                  *   act --mo--> curr
1746                                  * The following edge should be handled elsewhere:
1747                                  *   readfrom(act) --mo--> act
1748                                  */
1749                                 if (act->is_write())
1750                                         mo_graph->addEdge(act, curr);
1751                                 else if (act->is_read()) {
1752                                         //if previous read accessed a null, just keep going
1753                                         if (act->get_reads_from() == NULL)
1754                                                 continue;
1755                                         mo_graph->addEdge(act->get_reads_from(), curr);
1756                                 }
1757                                 added = true;
1758                                 break;
1759                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1760                                                      !act->same_thread(curr)) {
1761                                 /* We have an action that:
1762                                    (1) did not happen before us
1763                                    (2) is a read and we are a write
1764                                    (3) cannot synchronize with us
1765                                    (4) is in a different thread
1766                                    =>
1767                                    that read could potentially read from our write.  Note that
1768                                    these checks are overly conservative at this point, we'll
1769                                    do more checks before actually removing the
1770                                    pendingfuturevalue.
1771
1772                                  */
1773                                 if (thin_air_constraint_may_allow(curr, act)) {
1774                                         if (!is_infeasible() ||
1775                                                         (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1776                                                 struct PendingFutureValue pfv = {curr, act};
1777                                                 futurevalues->push_back(pfv);
1778                                         }
1779                                 }
1780                         }
1781                 }
1782         }
1783
1784         return added;
1785 }
1786
1787 /** Arbitrary reads from the future are not allowed.  Section 29.3
1788  * part 9 places some constraints.  This method checks one result of constraint
1789  * constraint.  Others require compiler support. */
1790 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1791 {
1792         if (!writer->is_rmw())
1793                 return true;
1794
1795         if (!reader->is_rmw())
1796                 return true;
1797
1798         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1799                 if (search == reader)
1800                         return false;
1801                 if (search->get_tid() == reader->get_tid() &&
1802                                 search->happens_before(reader))
1803                         break;
1804         }
1805
1806         return true;
1807 }
1808
1809 /**
1810  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1811  * some constraints. This method checks one the following constraint (others
1812  * require compiler support):
1813  *
1814  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1815  */
1816 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1817 {
1818         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1819         unsigned int i;
1820         /* Iterate over all threads */
1821         for (i = 0; i < thrd_lists->size(); i++) {
1822                 const ModelAction *write_after_read = NULL;
1823
1824                 /* Iterate over actions in thread, starting from most recent */
1825                 action_list_t *list = &(*thrd_lists)[i];
1826                 action_list_t::reverse_iterator rit;
1827                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1828                         ModelAction *act = *rit;
1829
1830                         /* Don't disallow due to act == reader */
1831                         if (!reader->happens_before(act) || reader == act)
1832                                 break;
1833                         else if (act->is_write())
1834                                 write_after_read = act;
1835                         else if (act->is_read() && act->get_reads_from() != NULL)
1836                                 write_after_read = act->get_reads_from();
1837                 }
1838
1839                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1840                         return false;
1841         }
1842         return true;
1843 }
1844
1845 /**
1846  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1847  * The ModelAction under consideration is expected to be taking part in
1848  * release/acquire synchronization as an object of the "reads from" relation.
1849  * Note that this can only provide release sequence support for RMW chains
1850  * which do not read from the future, as those actions cannot be traced until
1851  * their "promise" is fulfilled. Similarly, we may not even establish the
1852  * presence of a release sequence with certainty, as some modification order
1853  * constraints may be decided further in the future. Thus, this function
1854  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1855  * and a boolean representing certainty.
1856  *
1857  * @param rf The action that might be part of a release sequence. Must be a
1858  * write.
1859  * @param release_heads A pass-by-reference style return parameter. After
1860  * execution of this function, release_heads will contain the heads of all the
1861  * relevant release sequences, if any exists with certainty
1862  * @param pending A pass-by-reference style return parameter which is only used
1863  * when returning false (i.e., uncertain). Returns most information regarding
1864  * an uncertain release sequence, including any write operations that might
1865  * break the sequence.
1866  * @return true, if the ModelChecker is certain that release_heads is complete;
1867  * false otherwise
1868  */
1869 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1870                 rel_heads_list_t *release_heads,
1871                 struct release_seq *pending) const
1872 {
1873         /* Only check for release sequences if there are no cycles */
1874         if (mo_graph->checkForCycles())
1875                 return false;
1876
1877         while (rf) {
1878                 ASSERT(rf->is_write());
1879
1880                 if (rf->is_release())
1881                         release_heads->push_back(rf);
1882                 else if (rf->get_last_fence_release())
1883                         release_heads->push_back(rf->get_last_fence_release());
1884                 if (!rf->is_rmw())
1885                         break; /* End of RMW chain */
1886
1887                 /** @todo Need to be smarter here...  In the linux lock
1888                  * example, this will run to the beginning of the program for
1889                  * every acquire. */
1890                 /** @todo The way to be smarter here is to keep going until 1
1891                  * thread has a release preceded by an acquire and you've seen
1892                  *       both. */
1893
1894                 /* acq_rel RMW is a sufficient stopping condition */
1895                 if (rf->is_acquire() && rf->is_release())
1896                         return true; /* complete */
1897
1898                 rf = rf->get_reads_from();
1899         };
1900         if (!rf) {
1901                 /* read from future: need to settle this later */
1902                 pending->rf = NULL;
1903                 return false; /* incomplete */
1904         }
1905
1906         if (rf->is_release())
1907                 return true; /* complete */
1908
1909         /* else relaxed write
1910          * - check for fence-release in the same thread (29.8, stmt. 3)
1911          * - check modification order for contiguous subsequence
1912          *   -> rf must be same thread as release */
1913
1914         const ModelAction *fence_release = rf->get_last_fence_release();
1915         /* Synchronize with a fence-release unconditionally; we don't need to
1916          * find any more "contiguous subsequence..." for it */
1917         if (fence_release)
1918                 release_heads->push_back(fence_release);
1919
1920         int tid = id_to_int(rf->get_tid());
1921         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1922         action_list_t *list = &(*thrd_lists)[tid];
1923         action_list_t::const_reverse_iterator rit;
1924
1925         /* Find rf in the thread list */
1926         rit = std::find(list->rbegin(), list->rend(), rf);
1927         ASSERT(rit != list->rend());
1928
1929         /* Find the last {write,fence}-release */
1930         for (; rit != list->rend(); rit++) {
1931                 if (fence_release && *(*rit) < *fence_release)
1932                         break;
1933                 if ((*rit)->is_release())
1934                         break;
1935         }
1936         if (rit == list->rend()) {
1937                 /* No write-release in this thread */
1938                 return true; /* complete */
1939         } else if (fence_release && *(*rit) < *fence_release) {
1940                 /* The fence-release is more recent (and so, "stronger") than
1941                  * the most recent write-release */
1942                 return true; /* complete */
1943         } /* else, need to establish contiguous release sequence */
1944         ModelAction *release = *rit;
1945
1946         ASSERT(rf->same_thread(release));
1947
1948         pending->writes.clear();
1949
1950         bool certain = true;
1951         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1952                 if (id_to_int(rf->get_tid()) == (int)i)
1953                         continue;
1954                 list = &(*thrd_lists)[i];
1955
1956                 /* Can we ensure no future writes from this thread may break
1957                  * the release seq? */
1958                 bool future_ordered = false;
1959
1960                 ModelAction *last = get_last_action(int_to_id(i));
1961                 Thread *th = get_thread(int_to_id(i));
1962                 if ((last && rf->happens_before(last)) ||
1963                                 !is_enabled(th) ||
1964                                 th->is_complete())
1965                         future_ordered = true;
1966
1967                 ASSERT(!th->is_model_thread() || future_ordered);
1968
1969                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1970                         const ModelAction *act = *rit;
1971                         /* Reach synchronization -> this thread is complete */
1972                         if (act->happens_before(release))
1973                                 break;
1974                         if (rf->happens_before(act)) {
1975                                 future_ordered = true;
1976                                 continue;
1977                         }
1978
1979                         /* Only non-RMW writes can break release sequences */
1980                         if (!act->is_write() || act->is_rmw())
1981                                 continue;
1982
1983                         /* Check modification order */
1984                         if (mo_graph->checkReachable(rf, act)) {
1985                                 /* rf --mo--> act */
1986                                 future_ordered = true;
1987                                 continue;
1988                         }
1989                         if (mo_graph->checkReachable(act, release))
1990                                 /* act --mo--> release */
1991                                 break;
1992                         if (mo_graph->checkReachable(release, act) &&
1993                                       mo_graph->checkReachable(act, rf)) {
1994                                 /* release --mo-> act --mo--> rf */
1995                                 return true; /* complete */
1996                         }
1997                         /* act may break release sequence */
1998                         pending->writes.push_back(act);
1999                         certain = false;
2000                 }
2001                 if (!future_ordered)
2002                         certain = false; /* This thread is uncertain */
2003         }
2004
2005         if (certain) {
2006                 release_heads->push_back(release);
2007                 pending->writes.clear();
2008         } else {
2009                 pending->release = release;
2010                 pending->rf = rf;
2011         }
2012         return certain;
2013 }
2014
2015 /**
2016  * An interface for getting the release sequence head(s) with which a
2017  * given ModelAction must synchronize. This function only returns a non-empty
2018  * result when it can locate a release sequence head with certainty. Otherwise,
2019  * it may mark the internal state of the ModelChecker so that it will handle
2020  * the release sequence at a later time, causing @a acquire to update its
2021  * synchronization at some later point in execution.
2022  *
2023  * @param acquire The 'acquire' action that may synchronize with a release
2024  * sequence
2025  * @param read The read action that may read from a release sequence; this may
2026  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2027  * when 'acquire' is a fence-acquire)
2028  * @param release_heads A pass-by-reference return parameter. Will be filled
2029  * with the head(s) of the release sequence(s), if they exists with certainty.
2030  * @see ModelChecker::release_seq_heads
2031  */
2032 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2033                 ModelAction *read, rel_heads_list_t *release_heads)
2034 {
2035         const ModelAction *rf = read->get_reads_from();
2036         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2037         sequence->acquire = acquire;
2038         sequence->read = read;
2039
2040         if (!release_seq_heads(rf, release_heads, sequence)) {
2041                 /* add act to 'lazy checking' list */
2042                 pending_rel_seqs->push_back(sequence);
2043         } else {
2044                 snapshot_free(sequence);
2045         }
2046 }
2047
2048 /**
2049  * Attempt to resolve all stashed operations that might synchronize with a
2050  * release sequence for a given location. This implements the "lazy" portion of
2051  * determining whether or not a release sequence was contiguous, since not all
2052  * modification order information is present at the time an action occurs.
2053  *
2054  * @param location The location/object that should be checked for release
2055  * sequence resolutions. A NULL value means to check all locations.
2056  * @param work_queue The work queue to which to add work items as they are
2057  * generated
2058  * @return True if any updates occurred (new synchronization, new mo_graph
2059  * edges)
2060  */
2061 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2062 {
2063         bool updated = false;
2064         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2065         while (it != pending_rel_seqs->end()) {
2066                 struct release_seq *pending = *it;
2067                 ModelAction *acquire = pending->acquire;
2068                 const ModelAction *read = pending->read;
2069
2070                 /* Only resolve sequences on the given location, if provided */
2071                 if (location && read->get_location() != location) {
2072                         it++;
2073                         continue;
2074                 }
2075
2076                 const ModelAction *rf = read->get_reads_from();
2077                 rel_heads_list_t release_heads;
2078                 bool complete;
2079                 complete = release_seq_heads(rf, &release_heads, pending);
2080                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2081                         if (!acquire->has_synchronized_with(release_heads[i])) {
2082                                 if (acquire->synchronize_with(release_heads[i]))
2083                                         updated = true;
2084                                 else
2085                                         set_bad_synchronization();
2086                         }
2087                 }
2088
2089                 if (updated) {
2090                         /* Re-check all pending release sequences */
2091                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2092                         /* Re-check read-acquire for mo_graph edges */
2093                         if (acquire->is_read())
2094                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2095
2096                         /* propagate synchronization to later actions */
2097                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2098                         for (; (*rit) != acquire; rit++) {
2099                                 ModelAction *propagate = *rit;
2100                                 if (acquire->happens_before(propagate)) {
2101                                         propagate->synchronize_with(acquire);
2102                                         /* Re-check 'propagate' for mo_graph edges */
2103                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2104                                 }
2105                         }
2106                 }
2107                 if (complete) {
2108                         it = pending_rel_seqs->erase(it);
2109                         snapshot_free(pending);
2110                 } else {
2111                         it++;
2112                 }
2113         }
2114
2115         // If we resolved promises or data races, see if we have realized a data race.
2116         checkDataRaces();
2117
2118         return updated;
2119 }
2120
2121 /**
2122  * Performs various bookkeeping operations for the current ModelAction. For
2123  * instance, adds action to the per-object, per-thread action vector and to the
2124  * action trace list of all thread actions.
2125  *
2126  * @param act is the ModelAction to add.
2127  */
2128 void ModelChecker::add_action_to_lists(ModelAction *act)
2129 {
2130         int tid = id_to_int(act->get_tid());
2131         ModelAction *uninit = NULL;
2132         int uninit_id = -1;
2133         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2134         if (list->empty() && act->is_atomic_var()) {
2135                 uninit = new_uninitialized_action(act->get_location());
2136                 uninit_id = id_to_int(uninit->get_tid());
2137                 list->push_back(uninit);
2138         }
2139         list->push_back(act);
2140
2141         action_trace->push_back(act);
2142         if (uninit)
2143                 action_trace->push_front(uninit);
2144
2145         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2146         if (tid >= (int)vec->size())
2147                 vec->resize(priv->next_thread_id);
2148         (*vec)[tid].push_back(act);
2149         if (uninit)
2150                 (*vec)[uninit_id].push_front(uninit);
2151
2152         if ((int)thrd_last_action->size() <= tid)
2153                 thrd_last_action->resize(get_num_threads());
2154         (*thrd_last_action)[tid] = act;
2155         if (uninit)
2156                 (*thrd_last_action)[uninit_id] = uninit;
2157
2158         if (act->is_fence() && act->is_release()) {
2159                 if ((int)thrd_last_fence_release->size() <= tid)
2160                         thrd_last_fence_release->resize(get_num_threads());
2161                 (*thrd_last_fence_release)[tid] = act;
2162         }
2163
2164         if (act->is_wait()) {
2165                 void *mutex_loc = (void *) act->get_value();
2166                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2167
2168                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2169                 if (tid >= (int)vec->size())
2170                         vec->resize(priv->next_thread_id);
2171                 (*vec)[tid].push_back(act);
2172         }
2173 }
2174
2175 /**
2176  * @brief Get the last action performed by a particular Thread
2177  * @param tid The thread ID of the Thread in question
2178  * @return The last action in the thread
2179  */
2180 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2181 {
2182         int threadid = id_to_int(tid);
2183         if (threadid < (int)thrd_last_action->size())
2184                 return (*thrd_last_action)[id_to_int(tid)];
2185         else
2186                 return NULL;
2187 }
2188
2189 /**
2190  * @brief Get the last fence release performed by a particular Thread
2191  * @param tid The thread ID of the Thread in question
2192  * @return The last fence release in the thread, if one exists; NULL otherwise
2193  */
2194 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2195 {
2196         int threadid = id_to_int(tid);
2197         if (threadid < (int)thrd_last_fence_release->size())
2198                 return (*thrd_last_fence_release)[id_to_int(tid)];
2199         else
2200                 return NULL;
2201 }
2202
2203 /**
2204  * Gets the last memory_order_seq_cst write (in the total global sequence)
2205  * performed on a particular object (i.e., memory location), not including the
2206  * current action.
2207  * @param curr The current ModelAction; also denotes the object location to
2208  * check
2209  * @return The last seq_cst write
2210  */
2211 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2212 {
2213         void *location = curr->get_location();
2214         action_list_t *list = get_safe_ptr_action(obj_map, location);
2215         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2216         action_list_t::reverse_iterator rit;
2217         for (rit = list->rbegin(); rit != list->rend(); rit++)
2218                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2219                         return *rit;
2220         return NULL;
2221 }
2222
2223 /**
2224  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2225  * performed in a particular thread, prior to a particular fence.
2226  * @param tid The ID of the thread to check
2227  * @param before_fence The fence from which to begin the search; if NULL, then
2228  * search for the most recent fence in the thread.
2229  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2230  */
2231 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2232 {
2233         /* All fences should have NULL location */
2234         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2235         action_list_t::reverse_iterator rit = list->rbegin();
2236
2237         if (before_fence) {
2238                 for (; rit != list->rend(); rit++)
2239                         if (*rit == before_fence)
2240                                 break;
2241
2242                 ASSERT(*rit == before_fence);
2243                 rit++;
2244         }
2245
2246         for (; rit != list->rend(); rit++)
2247                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2248                         return *rit;
2249         return NULL;
2250 }
2251
2252 /**
2253  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2254  * location). This function identifies the mutex according to the current
2255  * action, which is presumed to perform on the same mutex.
2256  * @param curr The current ModelAction; also denotes the object location to
2257  * check
2258  * @return The last unlock operation
2259  */
2260 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2261 {
2262         void *location = curr->get_location();
2263         action_list_t *list = get_safe_ptr_action(obj_map, location);
2264         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2265         action_list_t::reverse_iterator rit;
2266         for (rit = list->rbegin(); rit != list->rend(); rit++)
2267                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2268                         return *rit;
2269         return NULL;
2270 }
2271
2272 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2273 {
2274         ModelAction *parent = get_last_action(tid);
2275         if (!parent)
2276                 parent = get_thread(tid)->get_creation();
2277         return parent;
2278 }
2279
2280 /**
2281  * Returns the clock vector for a given thread.
2282  * @param tid The thread whose clock vector we want
2283  * @return Desired clock vector
2284  */
2285 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2286 {
2287         return get_parent_action(tid)->get_cv();
2288 }
2289
2290 /**
2291  * Resolve a set of Promises with a current write. The set is provided in the
2292  * Node corresponding to @a write.
2293  * @param write The ModelAction that is fulfilling Promises
2294  * @return True if promises were resolved; false otherwise
2295  */
2296 bool ModelChecker::resolve_promises(ModelAction *write)
2297 {
2298         bool resolved = false;
2299         std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2300
2301         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2302                 Promise *promise = (*promises)[promise_index];
2303                 if (write->get_node()->get_promise(i)) {
2304                         ModelAction *read = promise->get_action();
2305                         if (read->is_rmw()) {
2306                                 mo_graph->addRMWEdge(write, read);
2307                         }
2308                         read_from(read, write);
2309                         //First fix up the modification order for actions that happened
2310                         //before the read
2311                         r_modification_order(read, write);
2312                         //Next fix up the modification order for actions that happened
2313                         //after the read.
2314                         post_r_modification_order(read, write);
2315                         //Make sure the promise's value matches the write's value
2316                         ASSERT(promise->get_value() == write->get_value());
2317                         delete(promise);
2318
2319                         promises->erase(promises->begin() + promise_index);
2320                         threads_to_check.push_back(read->get_tid());
2321
2322                         resolved = true;
2323                 } else
2324                         promise_index++;
2325         }
2326
2327         //Check whether reading these writes has made threads unable to
2328         //resolve promises
2329
2330         for (unsigned int i = 0; i < threads_to_check.size(); i++)
2331                 mo_check_promises(threads_to_check[i], write);
2332
2333         return resolved;
2334 }
2335
2336 /**
2337  * Compute the set of promises that could potentially be satisfied by this
2338  * action. Note that the set computation actually appears in the Node, not in
2339  * ModelChecker.
2340  * @param curr The ModelAction that may satisfy promises
2341  */
2342 void ModelChecker::compute_promises(ModelAction *curr)
2343 {
2344         for (unsigned int i = 0; i < promises->size(); i++) {
2345                 Promise *promise = (*promises)[i];
2346                 const ModelAction *act = promise->get_action();
2347                 if (!act->happens_before(curr) &&
2348                                 act->is_read() &&
2349                                 !act->could_synchronize_with(curr) &&
2350                                 !act->same_thread(curr) &&
2351                                 act->get_location() == curr->get_location() &&
2352                                 promise->get_value() == curr->get_value()) {
2353                         curr->get_node()->set_promise(i, act->is_rmw());
2354                 }
2355         }
2356 }
2357
2358 /** Checks promises in response to change in ClockVector Threads. */
2359 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2360 {
2361         for (unsigned int i = 0; i < promises->size(); i++) {
2362                 Promise *promise = (*promises)[i];
2363                 const ModelAction *act = promise->get_action();
2364                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2365                                 merge_cv->synchronized_since(act)) {
2366                         if (promise->increment_threads(tid)) {
2367                                 //Promise has failed
2368                                 priv->failed_promise = true;
2369                                 return;
2370                         }
2371                 }
2372         }
2373 }
2374
2375 void ModelChecker::check_promises_thread_disabled() {
2376         for (unsigned int i = 0; i < promises->size(); i++) {
2377                 Promise *promise = (*promises)[i];
2378                 if (promise->check_promise()) {
2379                         priv->failed_promise = true;
2380                         return;
2381                 }
2382         }
2383 }
2384
2385 /** Checks promises in response to addition to modification order for threads.
2386  * Definitions:
2387  * pthread is the thread that performed the read that created the promise
2388  *
2389  * pread is the read that created the promise
2390  *
2391  * pwrite is either the first write to same location as pread by
2392  * pthread that is sequenced after pread or the value read by the
2393  * first read to the same lcoation as pread by pthread that is
2394  * sequenced after pread..
2395  *
2396  *      1. If tid=pthread, then we check what other threads are reachable
2397  * through the mode order starting with pwrite.  Those threads cannot
2398  * perform a write that will resolve the promise due to modification
2399  * order constraints.
2400  *
2401  * 2. If the tid is not pthread, we check whether pwrite can reach the
2402  * action write through the modification order.  If so, that thread
2403  * cannot perform a future write that will resolve the promise due to
2404  * modificatin order constraints.
2405  *
2406  *      @param tid The thread that either read from the model action
2407  *      write, or actually did the model action write.
2408  *
2409  *      @param write The ModelAction representing the relevant write.
2410  */
2411 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2412 {
2413         void *location = write->get_location();
2414         for (unsigned int i = 0; i < promises->size(); i++) {
2415                 Promise *promise = (*promises)[i];
2416                 const ModelAction *act = promise->get_action();
2417
2418                 //Is this promise on the same location?
2419                 if (act->get_location() != location)
2420                         continue;
2421
2422                 //same thread as the promise
2423                 if (act->get_tid() == tid) {
2424
2425                         //do we have a pwrite for the promise, if not, set it
2426                         if (promise->get_write() == NULL) {
2427                                 promise->set_write(write);
2428                                 //The pwrite cannot happen before the promise
2429                                 if (write->happens_before(act) && (write != act)) {
2430                                         priv->failed_promise = true;
2431                                         return;
2432                                 }
2433                         }
2434                         if (mo_graph->checkPromise(write, promise)) {
2435                                 priv->failed_promise = true;
2436                                 return;
2437                         }
2438                 }
2439
2440                 //Don't do any lookups twice for the same thread
2441                 if (promise->has_sync_thread(tid))
2442                         continue;
2443
2444                 if (promise->get_write() && mo_graph->checkReachable(promise->get_write(), write)) {
2445                         if (promise->increment_threads(tid)) {
2446                                 priv->failed_promise = true;
2447                                 return;
2448                         }
2449                 }
2450         }
2451 }
2452
2453 /**
2454  * Compute the set of writes that may break the current pending release
2455  * sequence. This information is extracted from previou release sequence
2456  * calculations.
2457  *
2458  * @param curr The current ModelAction. Must be a release sequence fixup
2459  * action.
2460  */
2461 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2462 {
2463         if (pending_rel_seqs->empty())
2464                 return;
2465
2466         struct release_seq *pending = pending_rel_seqs->back();
2467         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2468                 const ModelAction *write = pending->writes[i];
2469                 curr->get_node()->add_relseq_break(write);
2470         }
2471
2472         /* NULL means don't break the sequence; just synchronize */
2473         curr->get_node()->add_relseq_break(NULL);
2474 }
2475
2476 /**
2477  * Build up an initial set of all past writes that this 'read' action may read
2478  * from. This set is determined by the clock vector's "happens before"
2479  * relationship.
2480  * @param curr is the current ModelAction that we are exploring; it must be a
2481  * 'read' operation.
2482  */
2483 void ModelChecker::build_reads_from_past(ModelAction *curr)
2484 {
2485         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2486         unsigned int i;
2487         ASSERT(curr->is_read());
2488
2489         ModelAction *last_sc_write = NULL;
2490
2491         if (curr->is_seqcst())
2492                 last_sc_write = get_last_seq_cst_write(curr);
2493
2494         /* Iterate over all threads */
2495         for (i = 0; i < thrd_lists->size(); i++) {
2496                 /* Iterate over actions in thread, starting from most recent */
2497                 action_list_t *list = &(*thrd_lists)[i];
2498                 action_list_t::reverse_iterator rit;
2499                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2500                         ModelAction *act = *rit;
2501
2502                         /* Only consider 'write' actions */
2503                         if (!act->is_write() || act == curr)
2504                                 continue;
2505
2506                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2507                         bool allow_read = true;
2508
2509                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2510                                 allow_read = false;
2511                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2512                                 allow_read = false;
2513
2514                         if (allow_read)
2515                                 curr->get_node()->add_read_from(act);
2516
2517                         /* Include at most one act per-thread that "happens before" curr */
2518                         if (act->happens_before(curr))
2519                                 break;
2520                 }
2521         }
2522
2523         if (DBG_ENABLED()) {
2524                 model_print("Reached read action:\n");
2525                 curr->print();
2526                 model_print("Printing may_read_from\n");
2527                 curr->get_node()->print_may_read_from();
2528                 model_print("End printing may_read_from\n");
2529         }
2530 }
2531
2532 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2533 {
2534         while (true) {
2535                 /* UNINIT actions don't have a Node, and they never sleep */
2536                 if (write->is_uninitialized())
2537                         return true;
2538                 Node *prevnode = write->get_node()->get_parent();
2539
2540                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2541                 if (write->is_release() && thread_sleep)
2542                         return true;
2543                 if (!write->is_rmw()) {
2544                         return false;
2545                 }
2546                 if (write->get_reads_from() == NULL)
2547                         return true;
2548                 write = write->get_reads_from();
2549         }
2550 }
2551
2552 /**
2553  * @brief Create a new action representing an uninitialized atomic
2554  * @param location The memory location of the atomic object
2555  * @return A pointer to a new ModelAction
2556  */
2557 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2558 {
2559         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2560         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2561         act->create_cv(NULL);
2562         return act;
2563 }
2564
2565 static void print_list(action_list_t *list, int exec_num = -1)
2566 {
2567         action_list_t::iterator it;
2568
2569         model_print("---------------------------------------------------------------------\n");
2570         if (exec_num >= 0)
2571                 model_print("Execution %d:\n", exec_num);
2572
2573         unsigned int hash = 0;
2574
2575         for (it = list->begin(); it != list->end(); it++) {
2576                 (*it)->print();
2577                 hash = hash^(hash<<3)^((*it)->hash());
2578         }
2579         model_print("HASH %u\n", hash);
2580         model_print("---------------------------------------------------------------------\n");
2581 }
2582
2583 #if SUPPORT_MOD_ORDER_DUMP
2584 void ModelChecker::dumpGraph(char *filename) const
2585 {
2586         char buffer[200];
2587         sprintf(buffer, "%s.dot", filename);
2588         FILE *file = fopen(buffer, "w");
2589         fprintf(file, "digraph %s {\n", filename);
2590         mo_graph->dumpNodes(file);
2591         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2592
2593         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2594                 ModelAction *action = *it;
2595                 if (action->is_read()) {
2596                         fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2597                         if (action->get_reads_from() != NULL)
2598                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2599                 }
2600                 if (thread_array[action->get_tid()] != NULL) {
2601                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2602                 }
2603
2604                 thread_array[action->get_tid()] = action;
2605         }
2606         fprintf(file, "}\n");
2607         model_free(thread_array);
2608         fclose(file);
2609 }
2610 #endif
2611
2612 /** @brief Prints an execution trace summary. */
2613 void ModelChecker::print_summary() const
2614 {
2615 #if SUPPORT_MOD_ORDER_DUMP
2616         scheduler->print();
2617         char buffername[100];
2618         sprintf(buffername, "exec%04u", stats.num_total);
2619         mo_graph->dumpGraphToFile(buffername);
2620         sprintf(buffername, "graph%04u", stats.num_total);
2621         dumpGraph(buffername);
2622 #endif
2623
2624         if (!isfeasibleprefix())
2625                 model_print("INFEASIBLE EXECUTION!\n");
2626         print_list(action_trace, stats.num_total);
2627         model_print("\n");
2628 }
2629
2630 /**
2631  * Add a Thread to the system for the first time. Should only be called once
2632  * per thread.
2633  * @param t The Thread to add
2634  */
2635 void ModelChecker::add_thread(Thread *t)
2636 {
2637         thread_map->put(id_to_int(t->get_id()), t);
2638         scheduler->add_thread(t);
2639 }
2640
2641 /**
2642  * Removes a thread from the scheduler.
2643  * @param the thread to remove.
2644  */
2645 void ModelChecker::remove_thread(Thread *t)
2646 {
2647         scheduler->remove_thread(t);
2648 }
2649
2650 /**
2651  * @brief Get a Thread reference by its ID
2652  * @param tid The Thread's ID
2653  * @return A Thread reference
2654  */
2655 Thread * ModelChecker::get_thread(thread_id_t tid) const
2656 {
2657         return thread_map->get(id_to_int(tid));
2658 }
2659
2660 /**
2661  * @brief Get a reference to the Thread in which a ModelAction was executed
2662  * @param act The ModelAction
2663  * @return A Thread reference
2664  */
2665 Thread * ModelChecker::get_thread(ModelAction *act) const
2666 {
2667         return get_thread(act->get_tid());
2668 }
2669
2670 /**
2671  * @brief Check if a Thread is currently enabled
2672  * @param t The Thread to check
2673  * @return True if the Thread is currently enabled
2674  */
2675 bool ModelChecker::is_enabled(Thread *t) const
2676 {
2677         return scheduler->is_enabled(t);
2678 }
2679
2680 /**
2681  * @brief Check if a Thread is currently enabled
2682  * @param tid The ID of the Thread to check
2683  * @return True if the Thread is currently enabled
2684  */
2685 bool ModelChecker::is_enabled(thread_id_t tid) const
2686 {
2687         return scheduler->is_enabled(tid);
2688 }
2689
2690 /**
2691  * Switch from a user-context to the "master thread" context (a.k.a. system
2692  * context). This switch is made with the intention of exploring a particular
2693  * model-checking action (described by a ModelAction object). Must be called
2694  * from a user-thread context.
2695  *
2696  * @param act The current action that will be explored. May be NULL only if
2697  * trace is exiting via an assertion (see ModelChecker::set_assert and
2698  * ModelChecker::has_asserted).
2699  * @return Return the value returned by the current action
2700  */
2701 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2702 {
2703         DBG();
2704         Thread *old = thread_current();
2705         set_current_action(act);
2706         old->set_state(THREAD_READY);
2707         if (Thread::swap(old, &system_context) < 0) {
2708                 perror("swap threads");
2709                 exit(EXIT_FAILURE);
2710         }
2711         return old->get_return_value();
2712 }
2713
2714 /**
2715  * Takes the next step in the execution, if possible.
2716  * @param curr The current step to take
2717  * @return Returns true (success) if a step was taken and false otherwise.
2718  */
2719 bool ModelChecker::take_step(ModelAction *curr)
2720 {
2721         if (has_asserted())
2722                 return false;
2723
2724         Thread *curr_thrd = get_thread(curr);
2725         ASSERT(curr_thrd->get_state() == THREAD_READY);
2726
2727         curr = check_current_action(curr);
2728
2729         /* Infeasible -> don't take any more steps */
2730         if (is_infeasible())
2731                 return false;
2732         else if (isfeasibleprefix() && have_bug_reports()) {
2733                 set_assert();
2734                 return false;
2735         }
2736
2737         if (params.bound != 0)
2738                 if (priv->used_sequence_numbers > params.bound)
2739                         return false;
2740
2741         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2742                 scheduler->remove_thread(curr_thrd);
2743
2744         Thread *next_thrd = get_next_thread(curr);
2745         next_thrd = scheduler->next_thread(next_thrd);
2746
2747         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2748                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2749
2750         /*
2751          * Launch end-of-execution release sequence fixups only when there are:
2752          *
2753          * (1) no more user threads to run (or when execution replay chooses
2754          *     the 'model_thread')
2755          * (2) pending release sequences
2756          * (3) pending assertions (i.e., data races)
2757          * (4) no pending promises
2758          */
2759         if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2760                         is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2761                 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2762                                 pending_rel_seqs->size());
2763                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2764                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2765                                 model_thread);
2766                 set_current_action(fixup);
2767                 return true;
2768         }
2769
2770         /* next_thrd == NULL -> don't take any more steps */
2771         if (!next_thrd)
2772                 return false;
2773
2774         next_thrd->set_state(THREAD_RUNNING);
2775
2776         if (next_thrd->get_pending() != NULL) {
2777                 /* restart a pending action */
2778                 set_current_action(next_thrd->get_pending());
2779                 next_thrd->set_pending(NULL);
2780                 next_thrd->set_state(THREAD_READY);
2781                 return true;
2782         }
2783
2784         /* Return false only if swap fails with an error */
2785         return (Thread::swap(&system_context, next_thrd) == 0);
2786 }
2787
2788 /** Wrapper to run the user's main function, with appropriate arguments */
2789 void user_main_wrapper(void *)
2790 {
2791         user_main(model->params.argc, model->params.argv);
2792 }
2793
2794 /** @brief Run ModelChecker for the user program */
2795 void ModelChecker::run()
2796 {
2797         do {
2798                 thrd_t user_thread;
2799                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2800
2801                 add_thread(t);
2802
2803                 /* Run user thread up to its first action */
2804                 scheduler->next_thread(t);
2805                 Thread::swap(&system_context, t);
2806
2807                 /* Wait for all threads to complete */
2808                 while (take_step(priv->current_action));
2809         } while (next_execution());
2810
2811         print_stats();
2812 }