model: refactor add_future_value, add documentation
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new SnapVector<Promise *>()),
90         futurevalues(new SnapVector<struct PendingFutureValue>()),
91         pending_rel_seqs(new SnapVector<struct release_seq *>()),
92         thrd_last_action(new SnapVector<ModelAction *>(1)),
93         thrd_last_fence_release(new SnapVector<ModelAction *>()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         SnapVector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new SnapVector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_misc()) {
264                         /* The next node will try to satisfy a different misc_index values. */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else if (nextnode->increment_promise()) {
268                         /* The next node will try to satisfy a different set of promises. */
269                         tid = next->get_tid();
270                         node_stack->pop_restofstack(2);
271                 } else if (nextnode->increment_read_from()) {
272                         /* The next node will read from a different value. */
273                         tid = next->get_tid();
274                         node_stack->pop_restofstack(2);
275                 } else if (nextnode->increment_relseq_break()) {
276                         /* The next node will try to resolve a release sequence differently */
277                         tid = next->get_tid();
278                         node_stack->pop_restofstack(2);
279                 } else {
280                         ASSERT(prevnode);
281                         /* Make a different thread execute for next step */
282                         scheduler->add_sleep(get_thread(next->get_tid()));
283                         tid = prevnode->get_next_backtrack();
284                         /* Make sure the backtracked thread isn't sleeping. */
285                         node_stack->pop_restofstack(1);
286                         if (diverge == earliest_diverge) {
287                                 earliest_diverge = prevnode->get_action();
288                         }
289                 }
290                 /* Start the round robin scheduler from this thread id */
291                 scheduler->set_scheduler_thread(tid);
292                 /* The correct sleep set is in the parent node. */
293                 execute_sleep_set();
294
295                 DEBUG("*** Divergence point ***\n");
296
297                 diverge = NULL;
298         } else {
299                 tid = next->get_tid();
300         }
301         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
302         ASSERT(tid != THREAD_ID_T_NONE);
303         return thread_map->get(id_to_int(tid));
304 }
305
306 /**
307  * We need to know what the next actions of all threads in the sleep
308  * set will be.  This method computes them and stores the actions at
309  * the corresponding thread object's pending action.
310  */
311
312 void ModelChecker::execute_sleep_set()
313 {
314         for (unsigned int i = 0; i < get_num_threads(); i++) {
315                 thread_id_t tid = int_to_id(i);
316                 Thread *thr = get_thread(tid);
317                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
318                         thr->get_pending()->set_sleep_flag();
319                 }
320         }
321 }
322
323 /**
324  * @brief Should the current action wake up a given thread?
325  *
326  * @param curr The current action
327  * @param thread The thread that we might wake up
328  * @return True, if we should wake up the sleeping thread; false otherwise
329  */
330 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
331 {
332         const ModelAction *asleep = thread->get_pending();
333         /* Don't allow partial RMW to wake anyone up */
334         if (curr->is_rmwr())
335                 return false;
336         /* Synchronizing actions may have been backtracked */
337         if (asleep->could_synchronize_with(curr))
338                 return true;
339         /* All acquire/release fences and fence-acquire/store-release */
340         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
341                 return true;
342         /* Fence-release + store can awake load-acquire on the same location */
343         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
344                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
345                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
346                         return true;
347         }
348         return false;
349 }
350
351 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
352 {
353         for (unsigned int i = 0; i < get_num_threads(); i++) {
354                 Thread *thr = get_thread(int_to_id(i));
355                 if (scheduler->is_sleep_set(thr)) {
356                         if (should_wake_up(curr, thr))
357                                 /* Remove this thread from sleep set */
358                                 scheduler->remove_sleep(thr);
359                 }
360         }
361 }
362
363 /** @brief Alert the model-checker that an incorrectly-ordered
364  * synchronization was made */
365 void ModelChecker::set_bad_synchronization()
366 {
367         priv->bad_synchronization = true;
368 }
369
370 /**
371  * Check whether the current trace has triggered an assertion which should halt
372  * its execution.
373  *
374  * @return True, if the execution should be aborted; false otherwise
375  */
376 bool ModelChecker::has_asserted() const
377 {
378         return priv->asserted;
379 }
380
381 /**
382  * Trigger a trace assertion which should cause this execution to be halted.
383  * This can be due to a detected bug or due to an infeasibility that should
384  * halt ASAP.
385  */
386 void ModelChecker::set_assert()
387 {
388         priv->asserted = true;
389 }
390
391 /**
392  * Check if we are in a deadlock. Should only be called at the end of an
393  * execution, although it should not give false positives in the middle of an
394  * execution (there should be some ENABLED thread).
395  *
396  * @return True if program is in a deadlock; false otherwise
397  */
398 bool ModelChecker::is_deadlocked() const
399 {
400         bool blocking_threads = false;
401         for (unsigned int i = 0; i < get_num_threads(); i++) {
402                 thread_id_t tid = int_to_id(i);
403                 if (is_enabled(tid))
404                         return false;
405                 Thread *t = get_thread(tid);
406                 if (!t->is_model_thread() && t->get_pending())
407                         blocking_threads = true;
408         }
409         return blocking_threads;
410 }
411
412 /**
413  * Check if a Thread has entered a circular wait deadlock situation. This will
414  * not check other threads for potential deadlock situations, and may miss
415  * deadlocks involving WAIT.
416  *
417  * @param t The thread which may have entered a deadlock
418  * @return True if this Thread entered a deadlock; false otherwise
419  */
420 bool ModelChecker::is_circular_wait(const Thread *t) const
421 {
422         for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on())
423                 if (waiting == t)
424                         return true;
425         return false;
426 }
427
428 /**
429  * Check if this is a complete execution. That is, have all thread completed
430  * execution (rather than exiting because sleep sets have forced a redundant
431  * execution).
432  *
433  * @return True if the execution is complete.
434  */
435 bool ModelChecker::is_complete_execution() const
436 {
437         for (unsigned int i = 0; i < get_num_threads(); i++)
438                 if (is_enabled(int_to_id(i)))
439                         return false;
440         return true;
441 }
442
443 /**
444  * @brief Assert a bug in the executing program.
445  *
446  * Use this function to assert any sort of bug in the user program. If the
447  * current trace is feasible (actually, a prefix of some feasible execution),
448  * then this execution will be aborted, printing the appropriate message. If
449  * the current trace is not yet feasible, the error message will be stashed and
450  * printed if the execution ever becomes feasible.
451  *
452  * @param msg Descriptive message for the bug (do not include newline char)
453  * @return True if bug is immediately-feasible
454  */
455 bool ModelChecker::assert_bug(const char *msg)
456 {
457         priv->bugs.push_back(new bug_message(msg));
458
459         if (isfeasibleprefix()) {
460                 set_assert();
461                 return true;
462         }
463         return false;
464 }
465
466 /**
467  * @brief Assert a bug in the executing program, asserted by a user thread
468  * @see ModelChecker::assert_bug
469  * @param msg Descriptive message for the bug (do not include newline char)
470  */
471 void ModelChecker::assert_user_bug(const char *msg)
472 {
473         /* If feasible bug, bail out now */
474         if (assert_bug(msg))
475                 switch_to_master(NULL);
476 }
477
478 /** @return True, if any bugs have been reported for this execution */
479 bool ModelChecker::have_bug_reports() const
480 {
481         return priv->bugs.size() != 0;
482 }
483
484 /** @brief Print bug report listing for this execution (if any bugs exist) */
485 void ModelChecker::print_bugs() const
486 {
487         if (have_bug_reports()) {
488                 model_print("Bug report: %zu bug%s detected\n",
489                                 priv->bugs.size(),
490                                 priv->bugs.size() > 1 ? "s" : "");
491                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
492                         priv->bugs[i]->print();
493         }
494 }
495
496 /**
497  * @brief Record end-of-execution stats
498  *
499  * Must be run when exiting an execution. Records various stats.
500  * @see struct execution_stats
501  */
502 void ModelChecker::record_stats()
503 {
504         stats.num_total++;
505         if (!isfeasibleprefix())
506                 stats.num_infeasible++;
507         else if (have_bug_reports())
508                 stats.num_buggy_executions++;
509         else if (is_complete_execution())
510                 stats.num_complete++;
511         else {
512                 stats.num_redundant++;
513
514                 /**
515                  * @todo We can violate this ASSERT() when fairness/sleep sets
516                  * conflict to cause an execution to terminate, e.g. with:
517                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
518                  */
519                 //ASSERT(scheduler->all_threads_sleeping());
520         }
521 }
522
523 /** @brief Print execution stats */
524 void ModelChecker::print_stats() const
525 {
526         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
527         model_print("Number of redundant executions: %d\n", stats.num_redundant);
528         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
529         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
530         model_print("Total executions: %d\n", stats.num_total);
531         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
532 }
533
534 /**
535  * @brief End-of-exeuction print
536  * @param printbugs Should any existing bugs be printed?
537  */
538 void ModelChecker::print_execution(bool printbugs) const
539 {
540         print_program_output();
541
542         if (params.verbose) {
543                 model_print("Earliest divergence point since last feasible execution:\n");
544                 if (earliest_diverge)
545                         earliest_diverge->print();
546                 else
547                         model_print("(Not set)\n");
548
549                 model_print("\n");
550                 print_stats();
551         }
552
553         /* Don't print invalid bugs */
554         if (printbugs)
555                 print_bugs();
556
557         model_print("\n");
558         print_summary();
559 }
560
561 /**
562  * Queries the model-checker for more executions to explore and, if one
563  * exists, resets the model-checker state to execute a new execution.
564  *
565  * @return If there are more executions to explore, return true. Otherwise,
566  * return false.
567  */
568 bool ModelChecker::next_execution()
569 {
570         DBG();
571         /* Is this execution a feasible execution that's worth bug-checking? */
572         bool complete = isfeasibleprefix() && (is_complete_execution() ||
573                         have_bug_reports());
574
575         /* End-of-execution bug checks */
576         if (complete) {
577                 if (is_deadlocked())
578                         assert_bug("Deadlock detected");
579
580                 checkDataRaces();
581         }
582
583         record_stats();
584
585         /* Output */
586         if (params.verbose || (complete && have_bug_reports()))
587                 print_execution(complete);
588         else
589                 clear_program_output();
590
591         if (complete)
592                 earliest_diverge = NULL;
593
594         if ((diverge = get_next_backtrack()) == NULL)
595                 return false;
596
597         if (DBG_ENABLED()) {
598                 model_print("Next execution will diverge at:\n");
599                 diverge->print();
600         }
601
602         reset_to_initial_state();
603         return true;
604 }
605
606 /**
607  * @brief Find the last fence-related backtracking conflict for a ModelAction
608  *
609  * This function performs the search for the most recent conflicting action
610  * against which we should perform backtracking, as affected by fence
611  * operations. This includes pairs of potentially-synchronizing actions which
612  * occur due to fence-acquire or fence-release, and hence should be explored in
613  * the opposite execution order.
614  *
615  * @param act The current action
616  * @return The most recent action which conflicts with act due to fences
617  */
618 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
619 {
620         /* Only perform release/acquire fence backtracking for stores */
621         if (!act->is_write())
622                 return NULL;
623
624         /* Find a fence-release (or, act is a release) */
625         ModelAction *last_release;
626         if (act->is_release())
627                 last_release = act;
628         else
629                 last_release = get_last_fence_release(act->get_tid());
630         if (!last_release)
631                 return NULL;
632
633         /* Skip past the release */
634         action_list_t *list = action_trace;
635         action_list_t::reverse_iterator rit;
636         for (rit = list->rbegin(); rit != list->rend(); rit++)
637                 if (*rit == last_release)
638                         break;
639         ASSERT(rit != list->rend());
640
641         /* Find a prior:
642          *   load-acquire
643          * or
644          *   load --sb-> fence-acquire */
645         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
646         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
647         bool found_acquire_fences = false;
648         for ( ; rit != list->rend(); rit++) {
649                 ModelAction *prev = *rit;
650                 if (act->same_thread(prev))
651                         continue;
652
653                 int tid = id_to_int(prev->get_tid());
654
655                 if (prev->is_read() && act->same_var(prev)) {
656                         if (prev->is_acquire()) {
657                                 /* Found most recent load-acquire, don't need
658                                  * to search for more fences */
659                                 if (!found_acquire_fences)
660                                         return NULL;
661                         } else {
662                                 prior_loads[tid] = prev;
663                         }
664                 }
665                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
666                         found_acquire_fences = true;
667                         acquire_fences[tid] = prev;
668                 }
669         }
670
671         ModelAction *latest_backtrack = NULL;
672         for (unsigned int i = 0; i < acquire_fences.size(); i++)
673                 if (acquire_fences[i] && prior_loads[i])
674                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
675                                 latest_backtrack = acquire_fences[i];
676         return latest_backtrack;
677 }
678
679 /**
680  * @brief Find the last backtracking conflict for a ModelAction
681  *
682  * This function performs the search for the most recent conflicting action
683  * against which we should perform backtracking. This primary includes pairs of
684  * synchronizing actions which should be explored in the opposite execution
685  * order.
686  *
687  * @param act The current action
688  * @return The most recent action which conflicts with act
689  */
690 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
691 {
692         switch (act->get_type()) {
693         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
694         case ATOMIC_READ:
695         case ATOMIC_WRITE:
696         case ATOMIC_RMW: {
697                 ModelAction *ret = NULL;
698
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (prev->could_synchronize_with(act)) {
705                                 ret = prev;
706                                 break;
707                         }
708                 }
709
710                 ModelAction *ret2 = get_last_fence_conflict(act);
711                 if (!ret2)
712                         return ret;
713                 if (!ret)
714                         return ret2;
715                 if (*ret < *ret2)
716                         return ret2;
717                 return ret;
718         }
719         case ATOMIC_LOCK:
720         case ATOMIC_TRYLOCK: {
721                 /* linear search: from most recent to oldest */
722                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
723                 action_list_t::reverse_iterator rit;
724                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
725                         ModelAction *prev = *rit;
726                         if (act->is_conflicting_lock(prev))
727                                 return prev;
728                 }
729                 break;
730         }
731         case ATOMIC_UNLOCK: {
732                 /* linear search: from most recent to oldest */
733                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
734                 action_list_t::reverse_iterator rit;
735                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
736                         ModelAction *prev = *rit;
737                         if (!act->same_thread(prev) && prev->is_failed_trylock())
738                                 return prev;
739                 }
740                 break;
741         }
742         case ATOMIC_WAIT: {
743                 /* linear search: from most recent to oldest */
744                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
745                 action_list_t::reverse_iterator rit;
746                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
747                         ModelAction *prev = *rit;
748                         if (!act->same_thread(prev) && prev->is_failed_trylock())
749                                 return prev;
750                         if (!act->same_thread(prev) && prev->is_notify())
751                                 return prev;
752                 }
753                 break;
754         }
755
756         case ATOMIC_NOTIFY_ALL:
757         case ATOMIC_NOTIFY_ONE: {
758                 /* linear search: from most recent to oldest */
759                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
760                 action_list_t::reverse_iterator rit;
761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
762                         ModelAction *prev = *rit;
763                         if (!act->same_thread(prev) && prev->is_wait())
764                                 return prev;
765                 }
766                 break;
767         }
768         default:
769                 break;
770         }
771         return NULL;
772 }
773
774 /** This method finds backtracking points where we should try to
775  * reorder the parameter ModelAction against.
776  *
777  * @param the ModelAction to find backtracking points for.
778  */
779 void ModelChecker::set_backtracking(ModelAction *act)
780 {
781         Thread *t = get_thread(act);
782         ModelAction *prev = get_last_conflict(act);
783         if (prev == NULL)
784                 return;
785
786         Node *node = prev->get_node()->get_parent();
787
788         int low_tid, high_tid;
789         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
790                 low_tid = id_to_int(act->get_tid());
791                 high_tid = low_tid + 1;
792         } else {
793                 low_tid = 0;
794                 high_tid = get_num_threads();
795         }
796
797         for (int i = low_tid; i < high_tid; i++) {
798                 thread_id_t tid = int_to_id(i);
799
800                 /* Make sure this thread can be enabled here. */
801                 if (i >= node->get_num_threads())
802                         break;
803
804                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
805                 if (node->enabled_status(tid) != THREAD_ENABLED)
806                         continue;
807
808                 /* Check if this has been explored already */
809                 if (node->has_been_explored(tid))
810                         continue;
811
812                 /* See if fairness allows */
813                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
814                         bool unfair = false;
815                         for (int t = 0; t < node->get_num_threads(); t++) {
816                                 thread_id_t tother = int_to_id(t);
817                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
818                                         unfair = true;
819                                         break;
820                                 }
821                         }
822                         if (unfair)
823                                 continue;
824                 }
825
826                 /* See if CHESS-like yield fairness allows */
827                 if (model->params.yieldon) {
828                         bool unfair = false;
829                         for (int t = 0; t < node->get_num_threads(); t++) {
830                                 thread_id_t tother = int_to_id(t);
831                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
832                                         unfair = true;
833                                         break;
834                                 }
835                         }
836                         if (unfair)
837                                 continue;
838                 }
839                 
840                 /* Cache the latest backtracking point */
841                 set_latest_backtrack(prev);
842
843                 /* If this is a new backtracking point, mark the tree */
844                 if (!node->set_backtrack(tid))
845                         continue;
846                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
847                                         id_to_int(prev->get_tid()),
848                                         id_to_int(t->get_id()));
849                 if (DBG_ENABLED()) {
850                         prev->print();
851                         act->print();
852                 }
853         }
854 }
855
856 /**
857  * @brief Cache the a backtracking point as the "most recent", if eligible
858  *
859  * Note that this does not prepare the NodeStack for this backtracking
860  * operation, it only caches the action on a per-execution basis
861  *
862  * @param act The operation at which we should explore a different next action
863  * (i.e., backtracking point)
864  * @return True, if this action is now the most recent backtracking point;
865  * false otherwise
866  */
867 bool ModelChecker::set_latest_backtrack(ModelAction *act)
868 {
869         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
870                 priv->next_backtrack = act;
871                 return true;
872         }
873         return false;
874 }
875
876 /**
877  * Returns last backtracking point. The model checker will explore a different
878  * path for this point in the next execution.
879  * @return The ModelAction at which the next execution should diverge.
880  */
881 ModelAction * ModelChecker::get_next_backtrack()
882 {
883         ModelAction *next = priv->next_backtrack;
884         priv->next_backtrack = NULL;
885         return next;
886 }
887
888 /**
889  * Processes a read model action.
890  * @param curr is the read model action to process.
891  * @return True if processing this read updates the mo_graph.
892  */
893 bool ModelChecker::process_read(ModelAction *curr)
894 {
895         Node *node = curr->get_node();
896         while (true) {
897                 bool updated = false;
898                 switch (node->get_read_from_status()) {
899                 case READ_FROM_PAST: {
900                         const ModelAction *rf = node->get_read_from_past();
901                         ASSERT(rf);
902
903                         mo_graph->startChanges();
904
905                         ASSERT(!is_infeasible());
906                         if (!check_recency(curr, rf)) {
907                                 if (node->increment_read_from()) {
908                                         mo_graph->rollbackChanges();
909                                         continue;
910                                 } else {
911                                         priv->too_many_reads = true;
912                                 }
913                         }
914
915                         updated = r_modification_order(curr, rf);
916                         read_from(curr, rf);
917                         mo_graph->commitChanges();
918                         mo_check_promises(curr, true);
919                         break;
920                 }
921                 case READ_FROM_PROMISE: {
922                         Promise *promise = curr->get_node()->get_read_from_promise();
923                         if (promise->add_reader(curr))
924                                 priv->failed_promise = true;
925                         curr->set_read_from_promise(promise);
926                         mo_graph->startChanges();
927                         if (!check_recency(curr, promise))
928                                 priv->too_many_reads = true;
929                         updated = r_modification_order(curr, promise);
930                         mo_graph->commitChanges();
931                         break;
932                 }
933                 case READ_FROM_FUTURE: {
934                         /* Read from future value */
935                         struct future_value fv = node->get_future_value();
936                         Promise *promise = new Promise(curr, fv);
937                         curr->set_read_from_promise(promise);
938                         promises->push_back(promise);
939                         mo_graph->startChanges();
940                         updated = r_modification_order(curr, promise);
941                         mo_graph->commitChanges();
942                         break;
943                 }
944                 default:
945                         ASSERT(false);
946                 }
947                 get_thread(curr)->set_return_value(curr->get_return_value());
948                 return updated;
949         }
950 }
951
952 /**
953  * Processes a lock, trylock, or unlock model action.  @param curr is
954  * the read model action to process.
955  *
956  * The try lock operation checks whether the lock is taken.  If not,
957  * it falls to the normal lock operation case.  If so, it returns
958  * fail.
959  *
960  * The lock operation has already been checked that it is enabled, so
961  * it just grabs the lock and synchronizes with the previous unlock.
962  *
963  * The unlock operation has to re-enable all of the threads that are
964  * waiting on the lock.
965  *
966  * @return True if synchronization was updated; false otherwise
967  */
968 bool ModelChecker::process_mutex(ModelAction *curr)
969 {
970         std::mutex *mutex = curr->get_mutex();
971         struct std::mutex_state *state = NULL;
972
973         if (mutex)
974                 state = mutex->get_state();
975
976         switch (curr->get_type()) {
977         case ATOMIC_TRYLOCK: {
978                 bool success = !state->locked;
979                 curr->set_try_lock(success);
980                 if (!success) {
981                         get_thread(curr)->set_return_value(0);
982                         break;
983                 }
984                 get_thread(curr)->set_return_value(1);
985         }
986                 //otherwise fall into the lock case
987         case ATOMIC_LOCK: {
988                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
989                         assert_bug("Lock access before initialization");
990                 state->locked = get_thread(curr);
991                 ModelAction *unlock = get_last_unlock(curr);
992                 //synchronize with the previous unlock statement
993                 if (unlock != NULL) {
994                         curr->synchronize_with(unlock);
995                         return true;
996                 }
997                 break;
998         }
999         case ATOMIC_UNLOCK: {
1000                 //unlock the lock
1001                 state->locked = NULL;
1002                 //wake up the other threads
1003                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
1004                 //activate all the waiting threads
1005                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1006                         scheduler->wake(get_thread(*rit));
1007                 }
1008                 waiters->clear();
1009                 break;
1010         }
1011         case ATOMIC_WAIT: {
1012                 //unlock the lock
1013                 state->locked = NULL;
1014                 //wake up the other threads
1015                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
1016                 //activate all the waiting threads
1017                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1018                         scheduler->wake(get_thread(*rit));
1019                 }
1020                 waiters->clear();
1021                 //check whether we should go to sleep or not...simulate spurious failures
1022                 if (curr->get_node()->get_misc() == 0) {
1023                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1024                         //disable us
1025                         scheduler->sleep(get_thread(curr));
1026                 }
1027                 break;
1028         }
1029         case ATOMIC_NOTIFY_ALL: {
1030                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1031                 //activate all the waiting threads
1032                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1033                         scheduler->wake(get_thread(*rit));
1034                 }
1035                 waiters->clear();
1036                 break;
1037         }
1038         case ATOMIC_NOTIFY_ONE: {
1039                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1040                 int wakeupthread = curr->get_node()->get_misc();
1041                 action_list_t::iterator it = waiters->begin();
1042                 advance(it, wakeupthread);
1043                 scheduler->wake(get_thread(*it));
1044                 waiters->erase(it);
1045                 break;
1046         }
1047
1048         default:
1049                 ASSERT(0);
1050         }
1051         return false;
1052 }
1053
1054 /**
1055  * @brief Add a future value to a reader
1056  *
1057  * This function performs a few additional checks to ensure that the future
1058  * value can be feasibly observed by the reader
1059  *
1060  * @param writer The operation whose value is sent. Must be a write.
1061  * @param reader The read operation which may read the future value. Must be a read.
1062  */
1063 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1064 {
1065         /* Do more ambitious checks now that mo is more complete */
1066         if (!mo_may_allow(writer, reader))
1067                 return;
1068
1069         Node *node = reader->get_node();
1070
1071         /* Find an ancestor thread which exists at the time of the reader */
1072         Thread *write_thread = get_thread(writer);
1073         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1074                 write_thread = write_thread->get_parent();
1075
1076         struct future_value fv = {
1077                 writer->get_write_value(),
1078                 writer->get_seq_number() + params.maxfuturedelay,
1079                 write_thread->get_id(),
1080         };
1081         if (node->add_future_value(fv))
1082                 set_latest_backtrack(reader);
1083 }
1084
1085 /**
1086  * Process a write ModelAction
1087  * @param curr The ModelAction to process
1088  * @return True if the mo_graph was updated or promises were resolved
1089  */
1090 bool ModelChecker::process_write(ModelAction *curr)
1091 {
1092         /* Readers to which we may send our future value */
1093         ModelVector<ModelAction *> send_fv;
1094
1095         const ModelAction *earliest_promise_reader;
1096         bool updated_promises = false;
1097
1098         bool updated_mod_order = w_modification_order(curr, &send_fv);
1099         Promise *promise = pop_promise_to_resolve(curr);
1100
1101         if (promise) {
1102                 earliest_promise_reader = promise->get_reader(0);
1103                 updated_promises = resolve_promise(curr, promise);
1104         } else
1105                 earliest_promise_reader = NULL;
1106
1107         /* Don't send future values to reads after the Promise we resolve */
1108         for (unsigned int i = 0; i < send_fv.size(); i++) {
1109                 ModelAction *read = send_fv[i];
1110                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1111                         futurevalues->push_back(PendingFutureValue(curr, read));
1112         }
1113
1114         if (promises->empty()) {
1115                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1116                         struct PendingFutureValue pfv = (*futurevalues)[i];
1117                         add_future_value(pfv.writer, pfv.reader);
1118                 }
1119                 futurevalues->clear();
1120         }
1121
1122         mo_graph->commitChanges();
1123         mo_check_promises(curr, false);
1124
1125         get_thread(curr)->set_return_value(VALUE_NONE);
1126         return updated_mod_order || updated_promises;
1127 }
1128
1129 /**
1130  * Process a fence ModelAction
1131  * @param curr The ModelAction to process
1132  * @return True if synchronization was updated
1133  */
1134 bool ModelChecker::process_fence(ModelAction *curr)
1135 {
1136         /*
1137          * fence-relaxed: no-op
1138          * fence-release: only log the occurence (not in this function), for
1139          *   use in later synchronization
1140          * fence-acquire (this function): search for hypothetical release
1141          *   sequences
1142          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1143          */
1144         bool updated = false;
1145         if (curr->is_acquire()) {
1146                 action_list_t *list = action_trace;
1147                 action_list_t::reverse_iterator rit;
1148                 /* Find X : is_read(X) && X --sb-> curr */
1149                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1150                         ModelAction *act = *rit;
1151                         if (act == curr)
1152                                 continue;
1153                         if (act->get_tid() != curr->get_tid())
1154                                 continue;
1155                         /* Stop at the beginning of the thread */
1156                         if (act->is_thread_start())
1157                                 break;
1158                         /* Stop once we reach a prior fence-acquire */
1159                         if (act->is_fence() && act->is_acquire())
1160                                 break;
1161                         if (!act->is_read())
1162                                 continue;
1163                         /* read-acquire will find its own release sequences */
1164                         if (act->is_acquire())
1165                                 continue;
1166
1167                         /* Establish hypothetical release sequences */
1168                         rel_heads_list_t release_heads;
1169                         get_release_seq_heads(curr, act, &release_heads);
1170                         for (unsigned int i = 0; i < release_heads.size(); i++)
1171                                 if (!curr->synchronize_with(release_heads[i]))
1172                                         set_bad_synchronization();
1173                         if (release_heads.size() != 0)
1174                                 updated = true;
1175                 }
1176         }
1177         return updated;
1178 }
1179
1180 /**
1181  * @brief Process the current action for thread-related activity
1182  *
1183  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1184  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1185  * synchronization, etc.  This function is a no-op for non-THREAD actions
1186  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1187  *
1188  * @param curr The current action
1189  * @return True if synchronization was updated or a thread completed
1190  */
1191 bool ModelChecker::process_thread_action(ModelAction *curr)
1192 {
1193         bool updated = false;
1194
1195         switch (curr->get_type()) {
1196         case THREAD_CREATE: {
1197                 thrd_t *thrd = (thrd_t *)curr->get_location();
1198                 struct thread_params *params = (struct thread_params *)curr->get_value();
1199                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1200                 add_thread(th);
1201                 th->set_creation(curr);
1202                 /* Promises can be satisfied by children */
1203                 for (unsigned int i = 0; i < promises->size(); i++) {
1204                         Promise *promise = (*promises)[i];
1205                         if (promise->thread_is_available(curr->get_tid()))
1206                                 promise->add_thread(th->get_id());
1207                 }
1208                 break;
1209         }
1210         case THREAD_JOIN: {
1211                 Thread *blocking = curr->get_thread_operand();
1212                 ModelAction *act = get_last_action(blocking->get_id());
1213                 curr->synchronize_with(act);
1214                 updated = true; /* trigger rel-seq checks */
1215                 break;
1216         }
1217         case THREAD_FINISH: {
1218                 Thread *th = get_thread(curr);
1219                 while (!th->wait_list_empty()) {
1220                         ModelAction *act = th->pop_wait_list();
1221                         scheduler->wake(get_thread(act));
1222                 }
1223                 th->complete();
1224                 /* Completed thread can't satisfy promises */
1225                 for (unsigned int i = 0; i < promises->size(); i++) {
1226                         Promise *promise = (*promises)[i];
1227                         if (promise->thread_is_available(th->get_id()))
1228                                 if (promise->eliminate_thread(th->get_id()))
1229                                         priv->failed_promise = true;
1230                 }
1231                 updated = true; /* trigger rel-seq checks */
1232                 break;
1233         }
1234         case THREAD_START: {
1235                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1236                 break;
1237         }
1238         default:
1239                 break;
1240         }
1241
1242         return updated;
1243 }
1244
1245 /**
1246  * @brief Process the current action for release sequence fixup activity
1247  *
1248  * Performs model-checker release sequence fixups for the current action,
1249  * forcing a single pending release sequence to break (with a given, potential
1250  * "loose" write) or to complete (i.e., synchronize). If a pending release
1251  * sequence forms a complete release sequence, then we must perform the fixup
1252  * synchronization, mo_graph additions, etc.
1253  *
1254  * @param curr The current action; must be a release sequence fixup action
1255  * @param work_queue The work queue to which to add work items as they are
1256  * generated
1257  */
1258 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1259 {
1260         const ModelAction *write = curr->get_node()->get_relseq_break();
1261         struct release_seq *sequence = pending_rel_seqs->back();
1262         pending_rel_seqs->pop_back();
1263         ASSERT(sequence);
1264         ModelAction *acquire = sequence->acquire;
1265         const ModelAction *rf = sequence->rf;
1266         const ModelAction *release = sequence->release;
1267         ASSERT(acquire);
1268         ASSERT(release);
1269         ASSERT(rf);
1270         ASSERT(release->same_thread(rf));
1271
1272         if (write == NULL) {
1273                 /**
1274                  * @todo Forcing a synchronization requires that we set
1275                  * modification order constraints. For instance, we can't allow
1276                  * a fixup sequence in which two separate read-acquire
1277                  * operations read from the same sequence, where the first one
1278                  * synchronizes and the other doesn't. Essentially, we can't
1279                  * allow any writes to insert themselves between 'release' and
1280                  * 'rf'
1281                  */
1282
1283                 /* Must synchronize */
1284                 if (!acquire->synchronize_with(release)) {
1285                         set_bad_synchronization();
1286                         return;
1287                 }
1288                 /* Re-check all pending release sequences */
1289                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1290                 /* Re-check act for mo_graph edges */
1291                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1292
1293                 /* propagate synchronization to later actions */
1294                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1295                 for (; (*rit) != acquire; rit++) {
1296                         ModelAction *propagate = *rit;
1297                         if (acquire->happens_before(propagate)) {
1298                                 propagate->synchronize_with(acquire);
1299                                 /* Re-check 'propagate' for mo_graph edges */
1300                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1301                         }
1302                 }
1303         } else {
1304                 /* Break release sequence with new edges:
1305                  *   release --mo--> write --mo--> rf */
1306                 mo_graph->addEdge(release, write);
1307                 mo_graph->addEdge(write, rf);
1308         }
1309
1310         /* See if we have realized a data race */
1311         checkDataRaces();
1312 }
1313
1314 /**
1315  * Initialize the current action by performing one or more of the following
1316  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1317  * in the NodeStack, manipulating backtracking sets, allocating and
1318  * initializing clock vectors, and computing the promises to fulfill.
1319  *
1320  * @param curr The current action, as passed from the user context; may be
1321  * freed/invalidated after the execution of this function, with a different
1322  * action "returned" its place (pass-by-reference)
1323  * @return True if curr is a newly-explored action; false otherwise
1324  */
1325 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1326 {
1327         ModelAction *newcurr;
1328
1329         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1330                 newcurr = process_rmw(*curr);
1331                 delete *curr;
1332
1333                 if (newcurr->is_rmw())
1334                         compute_promises(newcurr);
1335
1336                 *curr = newcurr;
1337                 return false;
1338         }
1339
1340         (*curr)->set_seq_number(get_next_seq_num());
1341
1342         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1343         if (newcurr) {
1344                 /* First restore type and order in case of RMW operation */
1345                 if ((*curr)->is_rmwr())
1346                         newcurr->copy_typeandorder(*curr);
1347
1348                 ASSERT((*curr)->get_location() == newcurr->get_location());
1349                 newcurr->copy_from_new(*curr);
1350
1351                 /* Discard duplicate ModelAction; use action from NodeStack */
1352                 delete *curr;
1353
1354                 /* Always compute new clock vector */
1355                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1356
1357                 *curr = newcurr;
1358                 return false; /* Action was explored previously */
1359         } else {
1360                 newcurr = *curr;
1361
1362                 /* Always compute new clock vector */
1363                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1364
1365                 /* Assign most recent release fence */
1366                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1367
1368                 /*
1369                  * Perform one-time actions when pushing new ModelAction onto
1370                  * NodeStack
1371                  */
1372                 if (newcurr->is_write())
1373                         compute_promises(newcurr);
1374                 else if (newcurr->is_relseq_fixup())
1375                         compute_relseq_breakwrites(newcurr);
1376                 else if (newcurr->is_wait())
1377                         newcurr->get_node()->set_misc_max(2);
1378                 else if (newcurr->is_notify_one()) {
1379                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1380                 }
1381                 return true; /* This was a new ModelAction */
1382         }
1383 }
1384
1385 /**
1386  * @brief Establish reads-from relation between two actions
1387  *
1388  * Perform basic operations involved with establishing a concrete rf relation,
1389  * including setting the ModelAction data and checking for release sequences.
1390  *
1391  * @param act The action that is reading (must be a read)
1392  * @param rf The action from which we are reading (must be a write)
1393  *
1394  * @return True if this read established synchronization
1395  */
1396 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1397 {
1398         ASSERT(rf);
1399         ASSERT(rf->is_write());
1400
1401         act->set_read_from(rf);
1402         if (act->is_acquire()) {
1403                 rel_heads_list_t release_heads;
1404                 get_release_seq_heads(act, act, &release_heads);
1405                 int num_heads = release_heads.size();
1406                 for (unsigned int i = 0; i < release_heads.size(); i++)
1407                         if (!act->synchronize_with(release_heads[i])) {
1408                                 set_bad_synchronization();
1409                                 num_heads--;
1410                         }
1411                 return num_heads > 0;
1412         }
1413         return false;
1414 }
1415
1416 /**
1417  * Check promises and eliminate potentially-satisfying threads when a thread is
1418  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1419  * no longer satisfy a promise generated from that thread.
1420  *
1421  * @param blocker The thread on which a thread is waiting
1422  * @param waiting The waiting thread
1423  */
1424 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1425 {
1426         for (unsigned int i = 0; i < promises->size(); i++) {
1427                 Promise *promise = (*promises)[i];
1428                 if (!promise->thread_is_available(waiting->get_id()))
1429                         continue;
1430                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1431                         ModelAction *reader = promise->get_reader(j);
1432                         if (reader->get_tid() != blocker->get_id())
1433                                 continue;
1434                         if (promise->eliminate_thread(waiting->get_id())) {
1435                                 /* Promise has failed */
1436                                 priv->failed_promise = true;
1437                         } else {
1438                                 /* Only eliminate the 'waiting' thread once */
1439                                 return;
1440                         }
1441                 }
1442         }
1443 }
1444
1445 /**
1446  * @brief Check whether a model action is enabled.
1447  *
1448  * Checks whether a lock or join operation would be successful (i.e., is the
1449  * lock already locked, or is the joined thread already complete). If not, put
1450  * the action in a waiter list.
1451  *
1452  * @param curr is the ModelAction to check whether it is enabled.
1453  * @return a bool that indicates whether the action is enabled.
1454  */
1455 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1456         if (curr->is_lock()) {
1457                 std::mutex *lock = (std::mutex *)curr->get_location();
1458                 struct std::mutex_state *state = lock->get_state();
1459                 if (state->locked) {
1460                         //Stick the action in the appropriate waiting queue
1461                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1462                         return false;
1463                 }
1464         } else if (curr->get_type() == THREAD_JOIN) {
1465                 Thread *blocking = (Thread *)curr->get_location();
1466                 if (!blocking->is_complete()) {
1467                         blocking->push_wait_list(curr);
1468                         thread_blocking_check_promises(blocking, get_thread(curr));
1469                         return false;
1470                 }
1471         }
1472
1473         return true;
1474 }
1475
1476 /**
1477  * This is the heart of the model checker routine. It performs model-checking
1478  * actions corresponding to a given "current action." Among other processes, it
1479  * calculates reads-from relationships, updates synchronization clock vectors,
1480  * forms a memory_order constraints graph, and handles replay/backtrack
1481  * execution when running permutations of previously-observed executions.
1482  *
1483  * @param curr The current action to process
1484  * @return The ModelAction that is actually executed; may be different than
1485  * curr; may be NULL, if the current action is not enabled to run
1486  */
1487 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1488 {
1489         ASSERT(curr);
1490         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1491
1492         if (!check_action_enabled(curr)) {
1493                 /* Make the execution look like we chose to run this action
1494                  * much later, when a lock/join can succeed */
1495                 get_thread(curr)->set_pending(curr);
1496                 scheduler->sleep(get_thread(curr));
1497                 return NULL;
1498         }
1499
1500         bool newly_explored = initialize_curr_action(&curr);
1501
1502         DBG();
1503         if (DBG_ENABLED())
1504                 curr->print();
1505
1506         wake_up_sleeping_actions(curr);
1507
1508         /* Compute fairness information for CHESS yield algorithm */
1509         if (model->params.yieldon) {
1510                 curr->get_node()->update_yield(scheduler);
1511         }
1512
1513         /* Add the action to lists before any other model-checking tasks */
1514         if (!second_part_of_rmw)
1515                 add_action_to_lists(curr);
1516
1517         /* Build may_read_from set for newly-created actions */
1518         if (newly_explored && curr->is_read())
1519                 build_may_read_from(curr);
1520
1521         /* Initialize work_queue with the "current action" work */
1522         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1523         while (!work_queue.empty() && !has_asserted()) {
1524                 WorkQueueEntry work = work_queue.front();
1525                 work_queue.pop_front();
1526
1527                 switch (work.type) {
1528                 case WORK_CHECK_CURR_ACTION: {
1529                         ModelAction *act = work.action;
1530                         bool update = false; /* update this location's release seq's */
1531                         bool update_all = false; /* update all release seq's */
1532
1533                         if (process_thread_action(curr))
1534                                 update_all = true;
1535
1536                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1537                                 update = true;
1538
1539                         if (act->is_write() && process_write(act))
1540                                 update = true;
1541
1542                         if (act->is_fence() && process_fence(act))
1543                                 update_all = true;
1544
1545                         if (act->is_mutex_op() && process_mutex(act))
1546                                 update_all = true;
1547
1548                         if (act->is_relseq_fixup())
1549                                 process_relseq_fixup(curr, &work_queue);
1550
1551                         if (update_all)
1552                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1553                         else if (update)
1554                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1555                         break;
1556                 }
1557                 case WORK_CHECK_RELEASE_SEQ:
1558                         resolve_release_sequences(work.location, &work_queue);
1559                         break;
1560                 case WORK_CHECK_MO_EDGES: {
1561                         /** @todo Complete verification of work_queue */
1562                         ModelAction *act = work.action;
1563                         bool updated = false;
1564
1565                         if (act->is_read()) {
1566                                 const ModelAction *rf = act->get_reads_from();
1567                                 const Promise *promise = act->get_reads_from_promise();
1568                                 if (rf) {
1569                                         if (r_modification_order(act, rf))
1570                                                 updated = true;
1571                                 } else if (promise) {
1572                                         if (r_modification_order(act, promise))
1573                                                 updated = true;
1574                                 }
1575                         }
1576                         if (act->is_write()) {
1577                                 if (w_modification_order(act, NULL))
1578                                         updated = true;
1579                         }
1580                         mo_graph->commitChanges();
1581
1582                         if (updated)
1583                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1584                         break;
1585                 }
1586                 default:
1587                         ASSERT(false);
1588                         break;
1589                 }
1590         }
1591
1592         check_curr_backtracking(curr);
1593         set_backtracking(curr);
1594         return curr;
1595 }
1596
1597 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1598 {
1599         Node *currnode = curr->get_node();
1600         Node *parnode = currnode->get_parent();
1601
1602         if ((parnode && !parnode->backtrack_empty()) ||
1603                          !currnode->misc_empty() ||
1604                          !currnode->read_from_empty() ||
1605                          !currnode->promise_empty() ||
1606                          !currnode->relseq_break_empty()) {
1607                 set_latest_backtrack(curr);
1608         }
1609 }
1610
1611 bool ModelChecker::promises_expired() const
1612 {
1613         for (unsigned int i = 0; i < promises->size(); i++) {
1614                 Promise *promise = (*promises)[i];
1615                 if (promise->get_expiration() < priv->used_sequence_numbers)
1616                         return true;
1617         }
1618         return false;
1619 }
1620
1621 /**
1622  * This is the strongest feasibility check available.
1623  * @return whether the current trace (partial or complete) must be a prefix of
1624  * a feasible trace.
1625  */
1626 bool ModelChecker::isfeasibleprefix() const
1627 {
1628         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1629 }
1630
1631 /**
1632  * Print disagnostic information about an infeasible execution
1633  * @param prefix A string to prefix the output with; if NULL, then a default
1634  * message prefix will be provided
1635  */
1636 void ModelChecker::print_infeasibility(const char *prefix) const
1637 {
1638         char buf[100];
1639         char *ptr = buf;
1640         if (mo_graph->checkForCycles())
1641                 ptr += sprintf(ptr, "[mo cycle]");
1642         if (priv->failed_promise)
1643                 ptr += sprintf(ptr, "[failed promise]");
1644         if (priv->too_many_reads)
1645                 ptr += sprintf(ptr, "[too many reads]");
1646         if (priv->no_valid_reads)
1647                 ptr += sprintf(ptr, "[no valid reads-from]");
1648         if (priv->bad_synchronization)
1649                 ptr += sprintf(ptr, "[bad sw ordering]");
1650         if (promises_expired())
1651                 ptr += sprintf(ptr, "[promise expired]");
1652         if (promises->size() != 0)
1653                 ptr += sprintf(ptr, "[unresolved promise]");
1654         if (ptr != buf)
1655                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1656 }
1657
1658 /**
1659  * Returns whether the current completed trace is feasible, except for pending
1660  * release sequences.
1661  */
1662 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1663 {
1664         return !is_infeasible() && promises->size() == 0;
1665 }
1666
1667 /**
1668  * Check if the current partial trace is infeasible. Does not check any
1669  * end-of-execution flags, which might rule out the execution. Thus, this is
1670  * useful only for ruling an execution as infeasible.
1671  * @return whether the current partial trace is infeasible.
1672  */
1673 bool ModelChecker::is_infeasible() const
1674 {
1675         return mo_graph->checkForCycles() ||
1676                 priv->no_valid_reads ||
1677                 priv->failed_promise ||
1678                 priv->too_many_reads ||
1679                 priv->bad_synchronization ||
1680                 promises_expired();
1681 }
1682
1683 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1684 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1685         ModelAction *lastread = get_last_action(act->get_tid());
1686         lastread->process_rmw(act);
1687         if (act->is_rmw()) {
1688                 if (lastread->get_reads_from())
1689                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1690                 else
1691                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1692                 mo_graph->commitChanges();
1693         }
1694         return lastread;
1695 }
1696
1697 /**
1698  * A helper function for ModelChecker::check_recency, to check if the current
1699  * thread is able to read from a different write/promise for 'params.maxreads'
1700  * number of steps and if that write/promise should become visible (i.e., is
1701  * ordered later in the modification order). This helps model memory liveness.
1702  *
1703  * @param curr The current action. Must be a read.
1704  * @param rf The write/promise from which we plan to read
1705  * @param other_rf The write/promise from which we may read
1706  * @return True if we were able to read from other_rf for params.maxreads steps
1707  */
1708 template <typename T, typename U>
1709 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1710 {
1711         /* Need a different write/promise */
1712         if (other_rf->equals(rf))
1713                 return false;
1714
1715         /* Only look for "newer" writes/promises */
1716         if (!mo_graph->checkReachable(rf, other_rf))
1717                 return false;
1718
1719         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1720         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1721         action_list_t::reverse_iterator rit = list->rbegin();
1722         ASSERT((*rit) == curr);
1723         /* Skip past curr */
1724         rit++;
1725
1726         /* Does this write/promise work for everyone? */
1727         for (int i = 0; i < params.maxreads; i++, rit++) {
1728                 ModelAction *act = *rit;
1729                 if (!act->may_read_from(other_rf))
1730                         return false;
1731         }
1732         return true;
1733 }
1734
1735 /**
1736  * Checks whether a thread has read from the same write or Promise for too many
1737  * times without seeing the effects of a later write/Promise.
1738  *
1739  * Basic idea:
1740  * 1) there must a different write/promise that we could read from,
1741  * 2) we must have read from the same write/promise in excess of maxreads times,
1742  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1743  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1744  *
1745  * If so, we decide that the execution is no longer feasible.
1746  *
1747  * @param curr The current action. Must be a read.
1748  * @param rf The ModelAction/Promise from which we might read.
1749  * @return True if the read should succeed; false otherwise
1750  */
1751 template <typename T>
1752 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1753 {
1754         if (!params.maxreads)
1755                 return true;
1756
1757         //NOTE: Next check is just optimization, not really necessary....
1758         if (curr->get_node()->get_read_from_past_size() +
1759                         curr->get_node()->get_read_from_promise_size() <= 1)
1760                 return true;
1761
1762         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1763         int tid = id_to_int(curr->get_tid());
1764         ASSERT(tid < (int)thrd_lists->size());
1765         action_list_t *list = &(*thrd_lists)[tid];
1766         action_list_t::reverse_iterator rit = list->rbegin();
1767         ASSERT((*rit) == curr);
1768         /* Skip past curr */
1769         rit++;
1770
1771         action_list_t::reverse_iterator ritcopy = rit;
1772         /* See if we have enough reads from the same value */
1773         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1774                 if (ritcopy == list->rend())
1775                         return true;
1776                 ModelAction *act = *ritcopy;
1777                 if (!act->is_read())
1778                         return true;
1779                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1780                         return true;
1781                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1782                         return true;
1783                 if (act->get_node()->get_read_from_past_size() +
1784                                 act->get_node()->get_read_from_promise_size() <= 1)
1785                         return true;
1786         }
1787         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1788                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1789                 if (should_read_instead(curr, rf, write))
1790                         return false; /* liveness failure */
1791         }
1792         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1793                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1794                 if (should_read_instead(curr, rf, promise))
1795                         return false; /* liveness failure */
1796         }
1797         return true;
1798 }
1799
1800 /**
1801  * Updates the mo_graph with the constraints imposed from the current
1802  * read.
1803  *
1804  * Basic idea is the following: Go through each other thread and find
1805  * the last action that happened before our read.  Two cases:
1806  *
1807  * (1) The action is a write => that write must either occur before
1808  * the write we read from or be the write we read from.
1809  *
1810  * (2) The action is a read => the write that that action read from
1811  * must occur before the write we read from or be the same write.
1812  *
1813  * @param curr The current action. Must be a read.
1814  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1815  * @return True if modification order edges were added; false otherwise
1816  */
1817 template <typename rf_type>
1818 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1819 {
1820         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1821         unsigned int i;
1822         bool added = false;
1823         ASSERT(curr->is_read());
1824
1825         /* Last SC fence in the current thread */
1826         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1827         ModelAction *last_sc_write = NULL;
1828         if (curr->is_seqcst())
1829                 last_sc_write = get_last_seq_cst_write(curr);
1830
1831         /* Iterate over all threads */
1832         for (i = 0; i < thrd_lists->size(); i++) {
1833                 /* Last SC fence in thread i */
1834                 ModelAction *last_sc_fence_thread_local = NULL;
1835                 if (int_to_id((int)i) != curr->get_tid())
1836                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1837
1838                 /* Last SC fence in thread i, before last SC fence in current thread */
1839                 ModelAction *last_sc_fence_thread_before = NULL;
1840                 if (last_sc_fence_local)
1841                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1842
1843                 /* Iterate over actions in thread, starting from most recent */
1844                 action_list_t *list = &(*thrd_lists)[i];
1845                 action_list_t::reverse_iterator rit;
1846                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1847                         ModelAction *act = *rit;
1848
1849                         /* Skip curr */
1850                         if (act == curr)
1851                                 continue;
1852                         /* Don't want to add reflexive edges on 'rf' */
1853                         if (act->equals(rf)) {
1854                                 if (act->happens_before(curr))
1855                                         break;
1856                                 else
1857                                         continue;
1858                         }
1859
1860                         if (act->is_write()) {
1861                                 /* C++, Section 29.3 statement 5 */
1862                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1863                                                 *act < *last_sc_fence_thread_local) {
1864                                         added = mo_graph->addEdge(act, rf) || added;
1865                                         break;
1866                                 }
1867                                 /* C++, Section 29.3 statement 4 */
1868                                 else if (act->is_seqcst() && last_sc_fence_local &&
1869                                                 *act < *last_sc_fence_local) {
1870                                         added = mo_graph->addEdge(act, rf) || added;
1871                                         break;
1872                                 }
1873                                 /* C++, Section 29.3 statement 6 */
1874                                 else if (last_sc_fence_thread_before &&
1875                                                 *act < *last_sc_fence_thread_before) {
1876                                         added = mo_graph->addEdge(act, rf) || added;
1877                                         break;
1878                                 }
1879                         }
1880
1881                         /* C++, Section 29.3 statement 3 (second subpoint) */
1882                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1883                                 added = mo_graph->addEdge(act, rf) || added;
1884                                 break;
1885                         }
1886
1887                         /*
1888                          * Include at most one act per-thread that "happens
1889                          * before" curr
1890                          */
1891                         if (act->happens_before(curr)) {
1892                                 if (act->is_write()) {
1893                                         added = mo_graph->addEdge(act, rf) || added;
1894                                 } else {
1895                                         const ModelAction *prevrf = act->get_reads_from();
1896                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1897                                         if (prevrf) {
1898                                                 if (!prevrf->equals(rf))
1899                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1900                                         } else if (!prevrf_promise->equals(rf)) {
1901                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1902                                         }
1903                                 }
1904                                 break;
1905                         }
1906                 }
1907         }
1908
1909         /*
1910          * All compatible, thread-exclusive promises must be ordered after any
1911          * concrete loads from the same thread
1912          */
1913         for (unsigned int i = 0; i < promises->size(); i++)
1914                 if ((*promises)[i]->is_compatible_exclusive(curr))
1915                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1916
1917         return added;
1918 }
1919
1920 /**
1921  * Updates the mo_graph with the constraints imposed from the current write.
1922  *
1923  * Basic idea is the following: Go through each other thread and find
1924  * the lastest action that happened before our write.  Two cases:
1925  *
1926  * (1) The action is a write => that write must occur before
1927  * the current write
1928  *
1929  * (2) The action is a read => the write that that action read from
1930  * must occur before the current write.
1931  *
1932  * This method also handles two other issues:
1933  *
1934  * (I) Sequential Consistency: Making sure that if the current write is
1935  * seq_cst, that it occurs after the previous seq_cst write.
1936  *
1937  * (II) Sending the write back to non-synchronizing reads.
1938  *
1939  * @param curr The current action. Must be a write.
1940  * @param send_fv A vector for stashing reads to which we may pass our future
1941  * value. If NULL, then don't record any future values.
1942  * @return True if modification order edges were added; false otherwise
1943  */
1944 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1945 {
1946         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1947         unsigned int i;
1948         bool added = false;
1949         ASSERT(curr->is_write());
1950
1951         if (curr->is_seqcst()) {
1952                 /* We have to at least see the last sequentially consistent write,
1953                          so we are initialized. */
1954                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1955                 if (last_seq_cst != NULL) {
1956                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1957                 }
1958         }
1959
1960         /* Last SC fence in the current thread */
1961         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1962
1963         /* Iterate over all threads */
1964         for (i = 0; i < thrd_lists->size(); i++) {
1965                 /* Last SC fence in thread i, before last SC fence in current thread */
1966                 ModelAction *last_sc_fence_thread_before = NULL;
1967                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1968                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1969
1970                 /* Iterate over actions in thread, starting from most recent */
1971                 action_list_t *list = &(*thrd_lists)[i];
1972                 action_list_t::reverse_iterator rit;
1973                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1974                         ModelAction *act = *rit;
1975                         if (act == curr) {
1976                                 /*
1977                                  * 1) If RMW and it actually read from something, then we
1978                                  * already have all relevant edges, so just skip to next
1979                                  * thread.
1980                                  *
1981                                  * 2) If RMW and it didn't read from anything, we should
1982                                  * whatever edge we can get to speed up convergence.
1983                                  *
1984                                  * 3) If normal write, we need to look at earlier actions, so
1985                                  * continue processing list.
1986                                  */
1987                                 if (curr->is_rmw()) {
1988                                         if (curr->get_reads_from() != NULL)
1989                                                 break;
1990                                         else
1991                                                 continue;
1992                                 } else
1993                                         continue;
1994                         }
1995
1996                         /* C++, Section 29.3 statement 7 */
1997                         if (last_sc_fence_thread_before && act->is_write() &&
1998                                         *act < *last_sc_fence_thread_before) {
1999                                 added = mo_graph->addEdge(act, curr) || added;
2000                                 break;
2001                         }
2002
2003                         /*
2004                          * Include at most one act per-thread that "happens
2005                          * before" curr
2006                          */
2007                         if (act->happens_before(curr)) {
2008                                 /*
2009                                  * Note: if act is RMW, just add edge:
2010                                  *   act --mo--> curr
2011                                  * The following edge should be handled elsewhere:
2012                                  *   readfrom(act) --mo--> act
2013                                  */
2014                                 if (act->is_write())
2015                                         added = mo_graph->addEdge(act, curr) || added;
2016                                 else if (act->is_read()) {
2017                                         //if previous read accessed a null, just keep going
2018                                         if (act->get_reads_from() == NULL)
2019                                                 continue;
2020                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2021                                 }
2022                                 break;
2023                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2024                                                      !act->same_thread(curr)) {
2025                                 /* We have an action that:
2026                                    (1) did not happen before us
2027                                    (2) is a read and we are a write
2028                                    (3) cannot synchronize with us
2029                                    (4) is in a different thread
2030                                    =>
2031                                    that read could potentially read from our write.  Note that
2032                                    these checks are overly conservative at this point, we'll
2033                                    do more checks before actually removing the
2034                                    pendingfuturevalue.
2035
2036                                  */
2037                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2038                                         if (!is_infeasible())
2039                                                 send_fv->push_back(act);
2040                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2041                                                 add_future_value(curr, act);
2042                                 }
2043                         }
2044                 }
2045         }
2046
2047         /*
2048          * All compatible, thread-exclusive promises must be ordered after any
2049          * concrete stores to the same thread, or else they can be merged with
2050          * this store later
2051          */
2052         for (unsigned int i = 0; i < promises->size(); i++)
2053                 if ((*promises)[i]->is_compatible_exclusive(curr))
2054                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2055
2056         return added;
2057 }
2058
2059 /** Arbitrary reads from the future are not allowed.  Section 29.3
2060  * part 9 places some constraints.  This method checks one result of constraint
2061  * constraint.  Others require compiler support. */
2062 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2063 {
2064         if (!writer->is_rmw())
2065                 return true;
2066
2067         if (!reader->is_rmw())
2068                 return true;
2069
2070         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2071                 if (search == reader)
2072                         return false;
2073                 if (search->get_tid() == reader->get_tid() &&
2074                                 search->happens_before(reader))
2075                         break;
2076         }
2077
2078         return true;
2079 }
2080
2081 /**
2082  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2083  * some constraints. This method checks one the following constraint (others
2084  * require compiler support):
2085  *
2086  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2087  */
2088 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2089 {
2090         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2091         unsigned int i;
2092         /* Iterate over all threads */
2093         for (i = 0; i < thrd_lists->size(); i++) {
2094                 const ModelAction *write_after_read = NULL;
2095
2096                 /* Iterate over actions in thread, starting from most recent */
2097                 action_list_t *list = &(*thrd_lists)[i];
2098                 action_list_t::reverse_iterator rit;
2099                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2100                         ModelAction *act = *rit;
2101
2102                         /* Don't disallow due to act == reader */
2103                         if (!reader->happens_before(act) || reader == act)
2104                                 break;
2105                         else if (act->is_write())
2106                                 write_after_read = act;
2107                         else if (act->is_read() && act->get_reads_from() != NULL)
2108                                 write_after_read = act->get_reads_from();
2109                 }
2110
2111                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2112                         return false;
2113         }
2114         return true;
2115 }
2116
2117 /**
2118  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2119  * The ModelAction under consideration is expected to be taking part in
2120  * release/acquire synchronization as an object of the "reads from" relation.
2121  * Note that this can only provide release sequence support for RMW chains
2122  * which do not read from the future, as those actions cannot be traced until
2123  * their "promise" is fulfilled. Similarly, we may not even establish the
2124  * presence of a release sequence with certainty, as some modification order
2125  * constraints may be decided further in the future. Thus, this function
2126  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2127  * and a boolean representing certainty.
2128  *
2129  * @param rf The action that might be part of a release sequence. Must be a
2130  * write.
2131  * @param release_heads A pass-by-reference style return parameter. After
2132  * execution of this function, release_heads will contain the heads of all the
2133  * relevant release sequences, if any exists with certainty
2134  * @param pending A pass-by-reference style return parameter which is only used
2135  * when returning false (i.e., uncertain). Returns most information regarding
2136  * an uncertain release sequence, including any write operations that might
2137  * break the sequence.
2138  * @return true, if the ModelChecker is certain that release_heads is complete;
2139  * false otherwise
2140  */
2141 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2142                 rel_heads_list_t *release_heads,
2143                 struct release_seq *pending) const
2144 {
2145         /* Only check for release sequences if there are no cycles */
2146         if (mo_graph->checkForCycles())
2147                 return false;
2148
2149         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2150                 ASSERT(rf->is_write());
2151
2152                 if (rf->is_release())
2153                         release_heads->push_back(rf);
2154                 else if (rf->get_last_fence_release())
2155                         release_heads->push_back(rf->get_last_fence_release());
2156                 if (!rf->is_rmw())
2157                         break; /* End of RMW chain */
2158
2159                 /** @todo Need to be smarter here...  In the linux lock
2160                  * example, this will run to the beginning of the program for
2161                  * every acquire. */
2162                 /** @todo The way to be smarter here is to keep going until 1
2163                  * thread has a release preceded by an acquire and you've seen
2164                  *       both. */
2165
2166                 /* acq_rel RMW is a sufficient stopping condition */
2167                 if (rf->is_acquire() && rf->is_release())
2168                         return true; /* complete */
2169         };
2170         if (!rf) {
2171                 /* read from future: need to settle this later */
2172                 pending->rf = NULL;
2173                 return false; /* incomplete */
2174         }
2175
2176         if (rf->is_release())
2177                 return true; /* complete */
2178
2179         /* else relaxed write
2180          * - check for fence-release in the same thread (29.8, stmt. 3)
2181          * - check modification order for contiguous subsequence
2182          *   -> rf must be same thread as release */
2183
2184         const ModelAction *fence_release = rf->get_last_fence_release();
2185         /* Synchronize with a fence-release unconditionally; we don't need to
2186          * find any more "contiguous subsequence..." for it */
2187         if (fence_release)
2188                 release_heads->push_back(fence_release);
2189
2190         int tid = id_to_int(rf->get_tid());
2191         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2192         action_list_t *list = &(*thrd_lists)[tid];
2193         action_list_t::const_reverse_iterator rit;
2194
2195         /* Find rf in the thread list */
2196         rit = std::find(list->rbegin(), list->rend(), rf);
2197         ASSERT(rit != list->rend());
2198
2199         /* Find the last {write,fence}-release */
2200         for (; rit != list->rend(); rit++) {
2201                 if (fence_release && *(*rit) < *fence_release)
2202                         break;
2203                 if ((*rit)->is_release())
2204                         break;
2205         }
2206         if (rit == list->rend()) {
2207                 /* No write-release in this thread */
2208                 return true; /* complete */
2209         } else if (fence_release && *(*rit) < *fence_release) {
2210                 /* The fence-release is more recent (and so, "stronger") than
2211                  * the most recent write-release */
2212                 return true; /* complete */
2213         } /* else, need to establish contiguous release sequence */
2214         ModelAction *release = *rit;
2215
2216         ASSERT(rf->same_thread(release));
2217
2218         pending->writes.clear();
2219
2220         bool certain = true;
2221         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2222                 if (id_to_int(rf->get_tid()) == (int)i)
2223                         continue;
2224                 list = &(*thrd_lists)[i];
2225
2226                 /* Can we ensure no future writes from this thread may break
2227                  * the release seq? */
2228                 bool future_ordered = false;
2229
2230                 ModelAction *last = get_last_action(int_to_id(i));
2231                 Thread *th = get_thread(int_to_id(i));
2232                 if ((last && rf->happens_before(last)) ||
2233                                 !is_enabled(th) ||
2234                                 th->is_complete())
2235                         future_ordered = true;
2236
2237                 ASSERT(!th->is_model_thread() || future_ordered);
2238
2239                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2240                         const ModelAction *act = *rit;
2241                         /* Reach synchronization -> this thread is complete */
2242                         if (act->happens_before(release))
2243                                 break;
2244                         if (rf->happens_before(act)) {
2245                                 future_ordered = true;
2246                                 continue;
2247                         }
2248
2249                         /* Only non-RMW writes can break release sequences */
2250                         if (!act->is_write() || act->is_rmw())
2251                                 continue;
2252
2253                         /* Check modification order */
2254                         if (mo_graph->checkReachable(rf, act)) {
2255                                 /* rf --mo--> act */
2256                                 future_ordered = true;
2257                                 continue;
2258                         }
2259                         if (mo_graph->checkReachable(act, release))
2260                                 /* act --mo--> release */
2261                                 break;
2262                         if (mo_graph->checkReachable(release, act) &&
2263                                       mo_graph->checkReachable(act, rf)) {
2264                                 /* release --mo-> act --mo--> rf */
2265                                 return true; /* complete */
2266                         }
2267                         /* act may break release sequence */
2268                         pending->writes.push_back(act);
2269                         certain = false;
2270                 }
2271                 if (!future_ordered)
2272                         certain = false; /* This thread is uncertain */
2273         }
2274
2275         if (certain) {
2276                 release_heads->push_back(release);
2277                 pending->writes.clear();
2278         } else {
2279                 pending->release = release;
2280                 pending->rf = rf;
2281         }
2282         return certain;
2283 }
2284
2285 /**
2286  * An interface for getting the release sequence head(s) with which a
2287  * given ModelAction must synchronize. This function only returns a non-empty
2288  * result when it can locate a release sequence head with certainty. Otherwise,
2289  * it may mark the internal state of the ModelChecker so that it will handle
2290  * the release sequence at a later time, causing @a acquire to update its
2291  * synchronization at some later point in execution.
2292  *
2293  * @param acquire The 'acquire' action that may synchronize with a release
2294  * sequence
2295  * @param read The read action that may read from a release sequence; this may
2296  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2297  * when 'acquire' is a fence-acquire)
2298  * @param release_heads A pass-by-reference return parameter. Will be filled
2299  * with the head(s) of the release sequence(s), if they exists with certainty.
2300  * @see ModelChecker::release_seq_heads
2301  */
2302 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2303                 ModelAction *read, rel_heads_list_t *release_heads)
2304 {
2305         const ModelAction *rf = read->get_reads_from();
2306         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2307         sequence->acquire = acquire;
2308         sequence->read = read;
2309
2310         if (!release_seq_heads(rf, release_heads, sequence)) {
2311                 /* add act to 'lazy checking' list */
2312                 pending_rel_seqs->push_back(sequence);
2313         } else {
2314                 snapshot_free(sequence);
2315         }
2316 }
2317
2318 /**
2319  * Attempt to resolve all stashed operations that might synchronize with a
2320  * release sequence for a given location. This implements the "lazy" portion of
2321  * determining whether or not a release sequence was contiguous, since not all
2322  * modification order information is present at the time an action occurs.
2323  *
2324  * @param location The location/object that should be checked for release
2325  * sequence resolutions. A NULL value means to check all locations.
2326  * @param work_queue The work queue to which to add work items as they are
2327  * generated
2328  * @return True if any updates occurred (new synchronization, new mo_graph
2329  * edges)
2330  */
2331 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2332 {
2333         bool updated = false;
2334         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2335         while (it != pending_rel_seqs->end()) {
2336                 struct release_seq *pending = *it;
2337                 ModelAction *acquire = pending->acquire;
2338                 const ModelAction *read = pending->read;
2339
2340                 /* Only resolve sequences on the given location, if provided */
2341                 if (location && read->get_location() != location) {
2342                         it++;
2343                         continue;
2344                 }
2345
2346                 const ModelAction *rf = read->get_reads_from();
2347                 rel_heads_list_t release_heads;
2348                 bool complete;
2349                 complete = release_seq_heads(rf, &release_heads, pending);
2350                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2351                         if (!acquire->has_synchronized_with(release_heads[i])) {
2352                                 if (acquire->synchronize_with(release_heads[i]))
2353                                         updated = true;
2354                                 else
2355                                         set_bad_synchronization();
2356                         }
2357                 }
2358
2359                 if (updated) {
2360                         /* Re-check all pending release sequences */
2361                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2362                         /* Re-check read-acquire for mo_graph edges */
2363                         if (acquire->is_read())
2364                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2365
2366                         /* propagate synchronization to later actions */
2367                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2368                         for (; (*rit) != acquire; rit++) {
2369                                 ModelAction *propagate = *rit;
2370                                 if (acquire->happens_before(propagate)) {
2371                                         propagate->synchronize_with(acquire);
2372                                         /* Re-check 'propagate' for mo_graph edges */
2373                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2374                                 }
2375                         }
2376                 }
2377                 if (complete) {
2378                         it = pending_rel_seqs->erase(it);
2379                         snapshot_free(pending);
2380                 } else {
2381                         it++;
2382                 }
2383         }
2384
2385         // If we resolved promises or data races, see if we have realized a data race.
2386         checkDataRaces();
2387
2388         return updated;
2389 }
2390
2391 /**
2392  * Performs various bookkeeping operations for the current ModelAction. For
2393  * instance, adds action to the per-object, per-thread action vector and to the
2394  * action trace list of all thread actions.
2395  *
2396  * @param act is the ModelAction to add.
2397  */
2398 void ModelChecker::add_action_to_lists(ModelAction *act)
2399 {
2400         int tid = id_to_int(act->get_tid());
2401         ModelAction *uninit = NULL;
2402         int uninit_id = -1;
2403         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2404         if (list->empty() && act->is_atomic_var()) {
2405                 uninit = get_uninitialized_action(act);
2406                 uninit_id = id_to_int(uninit->get_tid());
2407                 list->push_front(uninit);
2408         }
2409         list->push_back(act);
2410
2411         action_trace->push_back(act);
2412         if (uninit)
2413                 action_trace->push_front(uninit);
2414
2415         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2416         if (tid >= (int)vec->size())
2417                 vec->resize(priv->next_thread_id);
2418         (*vec)[tid].push_back(act);
2419         if (uninit)
2420                 (*vec)[uninit_id].push_front(uninit);
2421
2422         if ((int)thrd_last_action->size() <= tid)
2423                 thrd_last_action->resize(get_num_threads());
2424         (*thrd_last_action)[tid] = act;
2425         if (uninit)
2426                 (*thrd_last_action)[uninit_id] = uninit;
2427
2428         if (act->is_fence() && act->is_release()) {
2429                 if ((int)thrd_last_fence_release->size() <= tid)
2430                         thrd_last_fence_release->resize(get_num_threads());
2431                 (*thrd_last_fence_release)[tid] = act;
2432         }
2433
2434         if (act->is_wait()) {
2435                 void *mutex_loc = (void *) act->get_value();
2436                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2437
2438                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2439                 if (tid >= (int)vec->size())
2440                         vec->resize(priv->next_thread_id);
2441                 (*vec)[tid].push_back(act);
2442         }
2443 }
2444
2445 /**
2446  * @brief Get the last action performed by a particular Thread
2447  * @param tid The thread ID of the Thread in question
2448  * @return The last action in the thread
2449  */
2450 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2451 {
2452         int threadid = id_to_int(tid);
2453         if (threadid < (int)thrd_last_action->size())
2454                 return (*thrd_last_action)[id_to_int(tid)];
2455         else
2456                 return NULL;
2457 }
2458
2459 /**
2460  * @brief Get the last fence release performed by a particular Thread
2461  * @param tid The thread ID of the Thread in question
2462  * @return The last fence release in the thread, if one exists; NULL otherwise
2463  */
2464 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2465 {
2466         int threadid = id_to_int(tid);
2467         if (threadid < (int)thrd_last_fence_release->size())
2468                 return (*thrd_last_fence_release)[id_to_int(tid)];
2469         else
2470                 return NULL;
2471 }
2472
2473 /**
2474  * Gets the last memory_order_seq_cst write (in the total global sequence)
2475  * performed on a particular object (i.e., memory location), not including the
2476  * current action.
2477  * @param curr The current ModelAction; also denotes the object location to
2478  * check
2479  * @return The last seq_cst write
2480  */
2481 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2482 {
2483         void *location = curr->get_location();
2484         action_list_t *list = get_safe_ptr_action(obj_map, location);
2485         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2486         action_list_t::reverse_iterator rit;
2487         for (rit = list->rbegin(); (*rit) != curr; rit++)
2488                 ;
2489         rit++; /* Skip past curr */
2490         for ( ; rit != list->rend(); rit++)
2491                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2492                         return *rit;
2493         return NULL;
2494 }
2495
2496 /**
2497  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2498  * performed in a particular thread, prior to a particular fence.
2499  * @param tid The ID of the thread to check
2500  * @param before_fence The fence from which to begin the search; if NULL, then
2501  * search for the most recent fence in the thread.
2502  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2503  */
2504 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2505 {
2506         /* All fences should have NULL location */
2507         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2508         action_list_t::reverse_iterator rit = list->rbegin();
2509
2510         if (before_fence) {
2511                 for (; rit != list->rend(); rit++)
2512                         if (*rit == before_fence)
2513                                 break;
2514
2515                 ASSERT(*rit == before_fence);
2516                 rit++;
2517         }
2518
2519         for (; rit != list->rend(); rit++)
2520                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2521                         return *rit;
2522         return NULL;
2523 }
2524
2525 /**
2526  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2527  * location). This function identifies the mutex according to the current
2528  * action, which is presumed to perform on the same mutex.
2529  * @param curr The current ModelAction; also denotes the object location to
2530  * check
2531  * @return The last unlock operation
2532  */
2533 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2534 {
2535         void *location = curr->get_location();
2536         action_list_t *list = get_safe_ptr_action(obj_map, location);
2537         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2538         action_list_t::reverse_iterator rit;
2539         for (rit = list->rbegin(); rit != list->rend(); rit++)
2540                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2541                         return *rit;
2542         return NULL;
2543 }
2544
2545 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2546 {
2547         ModelAction *parent = get_last_action(tid);
2548         if (!parent)
2549                 parent = get_thread(tid)->get_creation();
2550         return parent;
2551 }
2552
2553 /**
2554  * Returns the clock vector for a given thread.
2555  * @param tid The thread whose clock vector we want
2556  * @return Desired clock vector
2557  */
2558 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2559 {
2560         return get_parent_action(tid)->get_cv();
2561 }
2562
2563 /**
2564  * @brief Find the promise (if any) to resolve for the current action and
2565  * remove it from the pending promise vector
2566  * @param curr The current ModelAction. Should be a write.
2567  * @return The Promise to resolve, if any; otherwise NULL
2568  */
2569 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2570 {
2571         for (unsigned int i = 0; i < promises->size(); i++)
2572                 if (curr->get_node()->get_promise(i)) {
2573                         Promise *ret = (*promises)[i];
2574                         promises->erase(promises->begin() + i);
2575                         return ret;
2576                 }
2577         return NULL;
2578 }
2579
2580 /**
2581  * Resolve a Promise with a current write.
2582  * @param write The ModelAction that is fulfilling Promises
2583  * @param promise The Promise to resolve
2584  * @return True if the Promise was successfully resolved; false otherwise
2585  */
2586 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2587 {
2588         ModelVector<ModelAction *> actions_to_check;
2589
2590         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2591                 ModelAction *read = promise->get_reader(i);
2592                 read_from(read, write);
2593                 actions_to_check.push_back(read);
2594         }
2595         /* Make sure the promise's value matches the write's value */
2596         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2597         if (!mo_graph->resolvePromise(promise, write))
2598                 priv->failed_promise = true;
2599
2600         /**
2601          * @todo  It is possible to end up in an inconsistent state, where a
2602          * "resolved" promise may still be referenced if
2603          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2604          *
2605          * Note that the inconsistency only matters when dumping mo_graph to
2606          * file.
2607          *
2608          * delete promise;
2609          */
2610
2611         //Check whether reading these writes has made threads unable to
2612         //resolve promises
2613         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2614                 ModelAction *read = actions_to_check[i];
2615                 mo_check_promises(read, true);
2616         }
2617
2618         return true;
2619 }
2620
2621 /**
2622  * Compute the set of promises that could potentially be satisfied by this
2623  * action. Note that the set computation actually appears in the Node, not in
2624  * ModelChecker.
2625  * @param curr The ModelAction that may satisfy promises
2626  */
2627 void ModelChecker::compute_promises(ModelAction *curr)
2628 {
2629         for (unsigned int i = 0; i < promises->size(); i++) {
2630                 Promise *promise = (*promises)[i];
2631                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2632                         continue;
2633
2634                 bool satisfy = true;
2635                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2636                         const ModelAction *act = promise->get_reader(j);
2637                         if (act->happens_before(curr) ||
2638                                         act->could_synchronize_with(curr)) {
2639                                 satisfy = false;
2640                                 break;
2641                         }
2642                 }
2643                 if (satisfy)
2644                         curr->get_node()->set_promise(i);
2645         }
2646 }
2647
2648 /** Checks promises in response to change in ClockVector Threads. */
2649 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2650 {
2651         for (unsigned int i = 0; i < promises->size(); i++) {
2652                 Promise *promise = (*promises)[i];
2653                 if (!promise->thread_is_available(tid))
2654                         continue;
2655                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2656                         const ModelAction *act = promise->get_reader(j);
2657                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2658                                         merge_cv->synchronized_since(act)) {
2659                                 if (promise->eliminate_thread(tid)) {
2660                                         /* Promise has failed */
2661                                         priv->failed_promise = true;
2662                                         return;
2663                                 }
2664                         }
2665                 }
2666         }
2667 }
2668
2669 void ModelChecker::check_promises_thread_disabled()
2670 {
2671         for (unsigned int i = 0; i < promises->size(); i++) {
2672                 Promise *promise = (*promises)[i];
2673                 if (promise->has_failed()) {
2674                         priv->failed_promise = true;
2675                         return;
2676                 }
2677         }
2678 }
2679
2680 /**
2681  * @brief Checks promises in response to addition to modification order for
2682  * threads.
2683  *
2684  * We test whether threads are still available for satisfying promises after an
2685  * addition to our modification order constraints. Those that are unavailable
2686  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2687  * that promise has failed.
2688  *
2689  * @param act The ModelAction which updated the modification order
2690  * @param is_read_check Should be true if act is a read and we must check for
2691  * updates to the store from which it read (there is a distinction here for
2692  * RMW's, which are both a load and a store)
2693  */
2694 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2695 {
2696         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2697
2698         for (unsigned int i = 0; i < promises->size(); i++) {
2699                 Promise *promise = (*promises)[i];
2700
2701                 // Is this promise on the same location?
2702                 if (!promise->same_location(write))
2703                         continue;
2704
2705                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2706                         const ModelAction *pread = promise->get_reader(j);
2707                         if (!pread->happens_before(act))
2708                                continue;
2709                         if (mo_graph->checkPromise(write, promise)) {
2710                                 priv->failed_promise = true;
2711                                 return;
2712                         }
2713                         break;
2714                 }
2715
2716                 // Don't do any lookups twice for the same thread
2717                 if (!promise->thread_is_available(act->get_tid()))
2718                         continue;
2719
2720                 if (mo_graph->checkReachable(promise, write)) {
2721                         if (mo_graph->checkPromise(write, promise)) {
2722                                 priv->failed_promise = true;
2723                                 return;
2724                         }
2725                 }
2726         }
2727 }
2728
2729 /**
2730  * Compute the set of writes that may break the current pending release
2731  * sequence. This information is extracted from previou release sequence
2732  * calculations.
2733  *
2734  * @param curr The current ModelAction. Must be a release sequence fixup
2735  * action.
2736  */
2737 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2738 {
2739         if (pending_rel_seqs->empty())
2740                 return;
2741
2742         struct release_seq *pending = pending_rel_seqs->back();
2743         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2744                 const ModelAction *write = pending->writes[i];
2745                 curr->get_node()->add_relseq_break(write);
2746         }
2747
2748         /* NULL means don't break the sequence; just synchronize */
2749         curr->get_node()->add_relseq_break(NULL);
2750 }
2751
2752 /**
2753  * Build up an initial set of all past writes that this 'read' action may read
2754  * from, as well as any previously-observed future values that must still be valid.
2755  *
2756  * @param curr is the current ModelAction that we are exploring; it must be a
2757  * 'read' operation.
2758  */
2759 void ModelChecker::build_may_read_from(ModelAction *curr)
2760 {
2761         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2762         unsigned int i;
2763         ASSERT(curr->is_read());
2764
2765         ModelAction *last_sc_write = NULL;
2766
2767         if (curr->is_seqcst())
2768                 last_sc_write = get_last_seq_cst_write(curr);
2769
2770         /* Iterate over all threads */
2771         for (i = 0; i < thrd_lists->size(); i++) {
2772                 /* Iterate over actions in thread, starting from most recent */
2773                 action_list_t *list = &(*thrd_lists)[i];
2774                 action_list_t::reverse_iterator rit;
2775                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2776                         ModelAction *act = *rit;
2777
2778                         /* Only consider 'write' actions */
2779                         if (!act->is_write() || act == curr)
2780                                 continue;
2781
2782                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2783                         bool allow_read = true;
2784
2785                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2786                                 allow_read = false;
2787                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2788                                 allow_read = false;
2789
2790                         if (allow_read) {
2791                                 /* Only add feasible reads */
2792                                 mo_graph->startChanges();
2793                                 r_modification_order(curr, act);
2794                                 if (!is_infeasible())
2795                                         curr->get_node()->add_read_from_past(act);
2796                                 mo_graph->rollbackChanges();
2797                         }
2798
2799                         /* Include at most one act per-thread that "happens before" curr */
2800                         if (act->happens_before(curr))
2801                                 break;
2802                 }
2803         }
2804
2805         /* Inherit existing, promised future values */
2806         for (i = 0; i < promises->size(); i++) {
2807                 const Promise *promise = (*promises)[i];
2808                 const ModelAction *promise_read = promise->get_reader(0);
2809                 if (promise_read->same_var(curr)) {
2810                         /* Only add feasible future-values */
2811                         mo_graph->startChanges();
2812                         r_modification_order(curr, promise);
2813                         if (!is_infeasible())
2814                                 curr->get_node()->add_read_from_promise(promise_read);
2815                         mo_graph->rollbackChanges();
2816                 }
2817         }
2818
2819         /* We may find no valid may-read-from only if the execution is doomed */
2820         if (!curr->get_node()->read_from_size()) {
2821                 priv->no_valid_reads = true;
2822                 set_assert();
2823         }
2824
2825         if (DBG_ENABLED()) {
2826                 model_print("Reached read action:\n");
2827                 curr->print();
2828                 model_print("Printing read_from_past\n");
2829                 curr->get_node()->print_read_from_past();
2830                 model_print("End printing read_from_past\n");
2831         }
2832 }
2833
2834 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2835 {
2836         for ( ; write != NULL; write = write->get_reads_from()) {
2837                 /* UNINIT actions don't have a Node, and they never sleep */
2838                 if (write->is_uninitialized())
2839                         return true;
2840                 Node *prevnode = write->get_node()->get_parent();
2841
2842                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2843                 if (write->is_release() && thread_sleep)
2844                         return true;
2845                 if (!write->is_rmw())
2846                         return false;
2847         }
2848         return true;
2849 }
2850
2851 /**
2852  * @brief Get an action representing an uninitialized atomic
2853  *
2854  * This function may create a new one or try to retrieve one from the NodeStack
2855  *
2856  * @param curr The current action, which prompts the creation of an UNINIT action
2857  * @return A pointer to the UNINIT ModelAction
2858  */
2859 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2860 {
2861         Node *node = curr->get_node();
2862         ModelAction *act = node->get_uninit_action();
2863         if (!act) {
2864                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2865                 node->set_uninit_action(act);
2866         }
2867         act->create_cv(NULL);
2868         return act;
2869 }
2870
2871 static void print_list(action_list_t *list)
2872 {
2873         action_list_t::iterator it;
2874
2875         model_print("---------------------------------------------------------------------\n");
2876
2877         unsigned int hash = 0;
2878
2879         for (it = list->begin(); it != list->end(); it++) {
2880                 const ModelAction *act = *it;
2881                 if (act->get_seq_number() > 0)
2882                         act->print();
2883                 hash = hash^(hash<<3)^((*it)->hash());
2884         }
2885         model_print("HASH %u\n", hash);
2886         model_print("---------------------------------------------------------------------\n");
2887 }
2888
2889 #if SUPPORT_MOD_ORDER_DUMP
2890 void ModelChecker::dumpGraph(char *filename) const
2891 {
2892         char buffer[200];
2893         sprintf(buffer, "%s.dot", filename);
2894         FILE *file = fopen(buffer, "w");
2895         fprintf(file, "digraph %s {\n", filename);
2896         mo_graph->dumpNodes(file);
2897         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2898
2899         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2900                 ModelAction *act = *it;
2901                 if (act->is_read()) {
2902                         mo_graph->dot_print_node(file, act);
2903                         if (act->get_reads_from())
2904                                 mo_graph->dot_print_edge(file,
2905                                                 act->get_reads_from(),
2906                                                 act,
2907                                                 "label=\"rf\", color=red, weight=2");
2908                         else
2909                                 mo_graph->dot_print_edge(file,
2910                                                 act->get_reads_from_promise(),
2911                                                 act,
2912                                                 "label=\"rf\", color=red");
2913                 }
2914                 if (thread_array[act->get_tid()]) {
2915                         mo_graph->dot_print_edge(file,
2916                                         thread_array[id_to_int(act->get_tid())],
2917                                         act,
2918                                         "label=\"sb\", color=blue, weight=400");
2919                 }
2920
2921                 thread_array[act->get_tid()] = act;
2922         }
2923         fprintf(file, "}\n");
2924         model_free(thread_array);
2925         fclose(file);
2926 }
2927 #endif
2928
2929 /** @brief Prints an execution trace summary. */
2930 void ModelChecker::print_summary() const
2931 {
2932 #if SUPPORT_MOD_ORDER_DUMP
2933         char buffername[100];
2934         sprintf(buffername, "exec%04u", stats.num_total);
2935         mo_graph->dumpGraphToFile(buffername);
2936         sprintf(buffername, "graph%04u", stats.num_total);
2937         dumpGraph(buffername);
2938 #endif
2939
2940         model_print("Execution %d:", stats.num_total);
2941         if (isfeasibleprefix()) {
2942                 if (scheduler->all_threads_sleeping())
2943                         model_print(" SLEEP-SET REDUNDANT");
2944                 model_print("\n");
2945         } else
2946                 print_infeasibility(" INFEASIBLE");
2947         print_list(action_trace);
2948         model_print("\n");
2949         if (!promises->empty()) {
2950                 model_print("Pending promises:\n");
2951                 for (unsigned int i = 0; i < promises->size(); i++) {
2952                         model_print(" [P%u] ", i);
2953                         (*promises)[i]->print();
2954                 }
2955                 model_print("\n");
2956         }
2957 }
2958
2959 /**
2960  * Add a Thread to the system for the first time. Should only be called once
2961  * per thread.
2962  * @param t The Thread to add
2963  */
2964 void ModelChecker::add_thread(Thread *t)
2965 {
2966         thread_map->put(id_to_int(t->get_id()), t);
2967         scheduler->add_thread(t);
2968 }
2969
2970 /**
2971  * @brief Get a Thread reference by its ID
2972  * @param tid The Thread's ID
2973  * @return A Thread reference
2974  */
2975 Thread * ModelChecker::get_thread(thread_id_t tid) const
2976 {
2977         return thread_map->get(id_to_int(tid));
2978 }
2979
2980 /**
2981  * @brief Get a reference to the Thread in which a ModelAction was executed
2982  * @param act The ModelAction
2983  * @return A Thread reference
2984  */
2985 Thread * ModelChecker::get_thread(const ModelAction *act) const
2986 {
2987         return get_thread(act->get_tid());
2988 }
2989
2990 /**
2991  * @brief Get a Promise's "promise number"
2992  *
2993  * A "promise number" is an index number that is unique to a promise, valid
2994  * only for a specific snapshot of an execution trace. Promises may come and go
2995  * as they are generated an resolved, so an index only retains meaning for the
2996  * current snapshot.
2997  *
2998  * @param promise The Promise to check
2999  * @return The promise index, if the promise still is valid; otherwise -1
3000  */
3001 int ModelChecker::get_promise_number(const Promise *promise) const
3002 {
3003         for (unsigned int i = 0; i < promises->size(); i++)
3004                 if ((*promises)[i] == promise)
3005                         return i;
3006         /* Not found */
3007         return -1;
3008 }
3009
3010 /**
3011  * @brief Check if a Thread is currently enabled
3012  * @param t The Thread to check
3013  * @return True if the Thread is currently enabled
3014  */
3015 bool ModelChecker::is_enabled(Thread *t) const
3016 {
3017         return scheduler->is_enabled(t);
3018 }
3019
3020 /**
3021  * @brief Check if a Thread is currently enabled
3022  * @param tid The ID of the Thread to check
3023  * @return True if the Thread is currently enabled
3024  */
3025 bool ModelChecker::is_enabled(thread_id_t tid) const
3026 {
3027         return scheduler->is_enabled(tid);
3028 }
3029
3030 /**
3031  * Switch from a model-checker context to a user-thread context. This is the
3032  * complement of ModelChecker::switch_to_master and must be called from the
3033  * model-checker context
3034  *
3035  * @param thread The user-thread to switch to
3036  */
3037 void ModelChecker::switch_from_master(Thread *thread)
3038 {
3039         scheduler->set_current_thread(thread);
3040         Thread::swap(&system_context, thread);
3041 }
3042
3043 /**
3044  * Switch from a user-context to the "master thread" context (a.k.a. system
3045  * context). This switch is made with the intention of exploring a particular
3046  * model-checking action (described by a ModelAction object). Must be called
3047  * from a user-thread context.
3048  *
3049  * @param act The current action that will be explored. May be NULL only if
3050  * trace is exiting via an assertion (see ModelChecker::set_assert and
3051  * ModelChecker::has_asserted).
3052  * @return Return the value returned by the current action
3053  */
3054 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3055 {
3056         DBG();
3057         Thread *old = thread_current();
3058         scheduler->set_current_thread(NULL);
3059         ASSERT(!old->get_pending());
3060         old->set_pending(act);
3061         if (Thread::swap(old, &system_context) < 0) {
3062                 perror("swap threads");
3063                 exit(EXIT_FAILURE);
3064         }
3065         return old->get_return_value();
3066 }
3067
3068 /**
3069  * Takes the next step in the execution, if possible.
3070  * @param curr The current step to take
3071  * @return Returns the next Thread to run, if any; NULL if this execution
3072  * should terminate
3073  */
3074 Thread * ModelChecker::take_step(ModelAction *curr)
3075 {
3076         Thread *curr_thrd = get_thread(curr);
3077         ASSERT(curr_thrd->get_state() == THREAD_READY);
3078
3079         curr = check_current_action(curr);
3080
3081         /* Infeasible -> don't take any more steps */
3082         if (is_infeasible())
3083                 return NULL;
3084         else if (isfeasibleprefix() && have_bug_reports()) {
3085                 set_assert();
3086                 return NULL;
3087         }
3088
3089         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3090                 return NULL;
3091
3092         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3093                 scheduler->remove_thread(curr_thrd);
3094
3095         Thread *next_thrd = NULL;
3096         if (curr)
3097                 next_thrd = action_select_next_thread(curr);
3098         if (!next_thrd)
3099                 next_thrd = get_next_thread();
3100
3101         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3102                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3103
3104         return next_thrd;
3105 }
3106
3107 /** Wrapper to run the user's main function, with appropriate arguments */
3108 void user_main_wrapper(void *)
3109 {
3110         user_main(model->params.argc, model->params.argv);
3111 }
3112
3113 /** @brief Run ModelChecker for the user program */
3114 void ModelChecker::run()
3115 {
3116         do {
3117                 thrd_t user_thread;
3118                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3119                 add_thread(t);
3120
3121                 do {
3122                         /*
3123                          * Stash next pending action(s) for thread(s). There
3124                          * should only need to stash one thread's action--the
3125                          * thread which just took a step--plus the first step
3126                          * for any newly-created thread
3127                          */
3128                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3129                                 thread_id_t tid = int_to_id(i);
3130                                 Thread *thr = get_thread(tid);
3131                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3132                                         switch_from_master(thr);
3133                                         if (is_circular_wait(thr))
3134                                                 assert_bug("Deadlock detected");
3135                                 }
3136                         }
3137
3138                         /* Catch assertions from prior take_step or from
3139                          * between-ModelAction bugs (e.g., data races) */
3140                         if (has_asserted())
3141                                 break;
3142
3143                         /* Consume the next action for a Thread */
3144                         ModelAction *curr = t->get_pending();
3145                         t->set_pending(NULL);
3146                         t = take_step(curr);
3147                 } while (t && !t->is_model_thread());
3148
3149                 /*
3150                  * Launch end-of-execution release sequence fixups only when
3151                  * the execution is otherwise feasible AND there are:
3152                  *
3153                  * (1) pending release sequences
3154                  * (2) pending assertions that could be invalidated by a change
3155                  * in clock vectors (i.e., data races)
3156                  * (3) no pending promises
3157                  */
3158                 while (!pending_rel_seqs->empty() &&
3159                                 is_feasible_prefix_ignore_relseq() &&
3160                                 !unrealizedraces.empty()) {
3161                         model_print("*** WARNING: release sequence fixup action "
3162                                         "(%zu pending release seuqence(s)) ***\n",
3163                                         pending_rel_seqs->size());
3164                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3165                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3166                                         model_thread);
3167                         take_step(fixup);
3168                 };
3169         } while (next_execution());
3170
3171         model_print("******* Model-checking complete: *******\n");
3172         print_stats();
3173 }