model: add 'const'
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new SnapVector<Promise *>()),
90         futurevalues(new SnapVector<struct PendingFutureValue>()),
91         pending_rel_seqs(new SnapVector<struct release_seq *>()),
92         thrd_last_action(new SnapVector<ModelAction *>(1)),
93         thrd_last_fence_release(new SnapVector<ModelAction *>()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         SnapVector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new SnapVector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_misc()) {
264                         /* The next node will try to satisfy a different misc_index values. */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else if (nextnode->increment_promise()) {
268                         /* The next node will try to satisfy a different set of promises. */
269                         tid = next->get_tid();
270                         node_stack->pop_restofstack(2);
271                 } else if (nextnode->increment_read_from()) {
272                         /* The next node will read from a different value. */
273                         tid = next->get_tid();
274                         node_stack->pop_restofstack(2);
275                 } else if (nextnode->increment_relseq_break()) {
276                         /* The next node will try to resolve a release sequence differently */
277                         tid = next->get_tid();
278                         node_stack->pop_restofstack(2);
279                 } else {
280                         ASSERT(prevnode);
281                         /* Make a different thread execute for next step */
282                         scheduler->add_sleep(get_thread(next->get_tid()));
283                         tid = prevnode->get_next_backtrack();
284                         /* Make sure the backtracked thread isn't sleeping. */
285                         node_stack->pop_restofstack(1);
286                         if (diverge == earliest_diverge) {
287                                 earliest_diverge = prevnode->get_action();
288                         }
289                 }
290                 /* Start the round robin scheduler from this thread id */
291                 scheduler->set_scheduler_thread(tid);
292                 /* The correct sleep set is in the parent node. */
293                 execute_sleep_set();
294
295                 DEBUG("*** Divergence point ***\n");
296
297                 diverge = NULL;
298         } else {
299                 tid = next->get_tid();
300         }
301         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
302         ASSERT(tid != THREAD_ID_T_NONE);
303         return thread_map->get(id_to_int(tid));
304 }
305
306 /**
307  * We need to know what the next actions of all threads in the sleep
308  * set will be.  This method computes them and stores the actions at
309  * the corresponding thread object's pending action.
310  */
311
312 void ModelChecker::execute_sleep_set()
313 {
314         for (unsigned int i = 0; i < get_num_threads(); i++) {
315                 thread_id_t tid = int_to_id(i);
316                 Thread *thr = get_thread(tid);
317                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
318                         thr->get_pending()->set_sleep_flag();
319                 }
320         }
321 }
322
323 /**
324  * @brief Should the current action wake up a given thread?
325  *
326  * @param curr The current action
327  * @param thread The thread that we might wake up
328  * @return True, if we should wake up the sleeping thread; false otherwise
329  */
330 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
331 {
332         const ModelAction *asleep = thread->get_pending();
333         /* Don't allow partial RMW to wake anyone up */
334         if (curr->is_rmwr())
335                 return false;
336         /* Synchronizing actions may have been backtracked */
337         if (asleep->could_synchronize_with(curr))
338                 return true;
339         /* All acquire/release fences and fence-acquire/store-release */
340         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
341                 return true;
342         /* Fence-release + store can awake load-acquire on the same location */
343         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
344                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
345                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
346                         return true;
347         }
348         return false;
349 }
350
351 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
352 {
353         for (unsigned int i = 0; i < get_num_threads(); i++) {
354                 Thread *thr = get_thread(int_to_id(i));
355                 if (scheduler->is_sleep_set(thr)) {
356                         if (should_wake_up(curr, thr))
357                                 /* Remove this thread from sleep set */
358                                 scheduler->remove_sleep(thr);
359                 }
360         }
361 }
362
363 /** @brief Alert the model-checker that an incorrectly-ordered
364  * synchronization was made */
365 void ModelChecker::set_bad_synchronization()
366 {
367         priv->bad_synchronization = true;
368 }
369
370 /**
371  * Check whether the current trace has triggered an assertion which should halt
372  * its execution.
373  *
374  * @return True, if the execution should be aborted; false otherwise
375  */
376 bool ModelChecker::has_asserted() const
377 {
378         return priv->asserted;
379 }
380
381 /**
382  * Trigger a trace assertion which should cause this execution to be halted.
383  * This can be due to a detected bug or due to an infeasibility that should
384  * halt ASAP.
385  */
386 void ModelChecker::set_assert()
387 {
388         priv->asserted = true;
389 }
390
391 /**
392  * Check if we are in a deadlock. Should only be called at the end of an
393  * execution, although it should not give false positives in the middle of an
394  * execution (there should be some ENABLED thread).
395  *
396  * @return True if program is in a deadlock; false otherwise
397  */
398 bool ModelChecker::is_deadlocked() const
399 {
400         bool blocking_threads = false;
401         for (unsigned int i = 0; i < get_num_threads(); i++) {
402                 thread_id_t tid = int_to_id(i);
403                 if (is_enabled(tid))
404                         return false;
405                 Thread *t = get_thread(tid);
406                 if (!t->is_model_thread() && t->get_pending())
407                         blocking_threads = true;
408         }
409         return blocking_threads;
410 }
411
412 /**
413  * Check if a Thread has entered a circular wait deadlock situation. This will
414  * not check other threads for potential deadlock situations, and may miss
415  * deadlocks involving WAIT.
416  *
417  * @param t The thread which may have entered a deadlock
418  * @return True if this Thread entered a deadlock; false otherwise
419  */
420 bool ModelChecker::is_circular_wait(const Thread *t) const
421 {
422         for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on())
423                 if (waiting == t)
424                         return true;
425         return false;
426 }
427
428 /**
429  * Check if this is a complete execution. That is, have all thread completed
430  * execution (rather than exiting because sleep sets have forced a redundant
431  * execution).
432  *
433  * @return True if the execution is complete.
434  */
435 bool ModelChecker::is_complete_execution() const
436 {
437         for (unsigned int i = 0; i < get_num_threads(); i++)
438                 if (is_enabled(int_to_id(i)))
439                         return false;
440         return true;
441 }
442
443 /**
444  * @brief Assert a bug in the executing program.
445  *
446  * Use this function to assert any sort of bug in the user program. If the
447  * current trace is feasible (actually, a prefix of some feasible execution),
448  * then this execution will be aborted, printing the appropriate message. If
449  * the current trace is not yet feasible, the error message will be stashed and
450  * printed if the execution ever becomes feasible.
451  *
452  * @param msg Descriptive message for the bug (do not include newline char)
453  * @return True if bug is immediately-feasible
454  */
455 bool ModelChecker::assert_bug(const char *msg)
456 {
457         priv->bugs.push_back(new bug_message(msg));
458
459         if (isfeasibleprefix()) {
460                 set_assert();
461                 return true;
462         }
463         return false;
464 }
465
466 /**
467  * @brief Assert a bug in the executing program, asserted by a user thread
468  * @see ModelChecker::assert_bug
469  * @param msg Descriptive message for the bug (do not include newline char)
470  */
471 void ModelChecker::assert_user_bug(const char *msg)
472 {
473         /* If feasible bug, bail out now */
474         if (assert_bug(msg))
475                 switch_to_master(NULL);
476 }
477
478 /** @return True, if any bugs have been reported for this execution */
479 bool ModelChecker::have_bug_reports() const
480 {
481         return priv->bugs.size() != 0;
482 }
483
484 /** @brief Print bug report listing for this execution (if any bugs exist) */
485 void ModelChecker::print_bugs() const
486 {
487         if (have_bug_reports()) {
488                 model_print("Bug report: %zu bug%s detected\n",
489                                 priv->bugs.size(),
490                                 priv->bugs.size() > 1 ? "s" : "");
491                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
492                         priv->bugs[i]->print();
493         }
494 }
495
496 /**
497  * @brief Record end-of-execution stats
498  *
499  * Must be run when exiting an execution. Records various stats.
500  * @see struct execution_stats
501  */
502 void ModelChecker::record_stats()
503 {
504         stats.num_total++;
505         if (!isfeasibleprefix())
506                 stats.num_infeasible++;
507         else if (have_bug_reports())
508                 stats.num_buggy_executions++;
509         else if (is_complete_execution())
510                 stats.num_complete++;
511         else {
512                 stats.num_redundant++;
513
514                 /**
515                  * @todo We can violate this ASSERT() when fairness/sleep sets
516                  * conflict to cause an execution to terminate, e.g. with:
517                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
518                  */
519                 //ASSERT(scheduler->all_threads_sleeping());
520         }
521 }
522
523 /** @brief Print execution stats */
524 void ModelChecker::print_stats() const
525 {
526         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
527         model_print("Number of redundant executions: %d\n", stats.num_redundant);
528         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
529         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
530         model_print("Total executions: %d\n", stats.num_total);
531         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
532 }
533
534 /**
535  * @brief End-of-exeuction print
536  * @param printbugs Should any existing bugs be printed?
537  */
538 void ModelChecker::print_execution(bool printbugs) const
539 {
540         print_program_output();
541
542         if (params.verbose) {
543                 model_print("Earliest divergence point since last feasible execution:\n");
544                 if (earliest_diverge)
545                         earliest_diverge->print();
546                 else
547                         model_print("(Not set)\n");
548
549                 model_print("\n");
550                 print_stats();
551         }
552
553         /* Don't print invalid bugs */
554         if (printbugs)
555                 print_bugs();
556
557         model_print("\n");
558         print_summary();
559 }
560
561 /**
562  * Queries the model-checker for more executions to explore and, if one
563  * exists, resets the model-checker state to execute a new execution.
564  *
565  * @return If there are more executions to explore, return true. Otherwise,
566  * return false.
567  */
568 bool ModelChecker::next_execution()
569 {
570         DBG();
571         /* Is this execution a feasible execution that's worth bug-checking? */
572         bool complete = isfeasibleprefix() && (is_complete_execution() ||
573                         have_bug_reports());
574
575         /* End-of-execution bug checks */
576         if (complete) {
577                 if (is_deadlocked())
578                         assert_bug("Deadlock detected");
579
580                 checkDataRaces();
581         }
582
583         record_stats();
584
585         /* Output */
586         if (params.verbose || (complete && have_bug_reports()))
587                 print_execution(complete);
588         else
589                 clear_program_output();
590
591         if (complete)
592                 earliest_diverge = NULL;
593
594         if ((diverge = get_next_backtrack()) == NULL)
595                 return false;
596
597         if (DBG_ENABLED()) {
598                 model_print("Next execution will diverge at:\n");
599                 diverge->print();
600         }
601
602         reset_to_initial_state();
603         return true;
604 }
605
606 /**
607  * @brief Find the last fence-related backtracking conflict for a ModelAction
608  *
609  * This function performs the search for the most recent conflicting action
610  * against which we should perform backtracking, as affected by fence
611  * operations. This includes pairs of potentially-synchronizing actions which
612  * occur due to fence-acquire or fence-release, and hence should be explored in
613  * the opposite execution order.
614  *
615  * @param act The current action
616  * @return The most recent action which conflicts with act due to fences
617  */
618 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
619 {
620         /* Only perform release/acquire fence backtracking for stores */
621         if (!act->is_write())
622                 return NULL;
623
624         /* Find a fence-release (or, act is a release) */
625         ModelAction *last_release;
626         if (act->is_release())
627                 last_release = act;
628         else
629                 last_release = get_last_fence_release(act->get_tid());
630         if (!last_release)
631                 return NULL;
632
633         /* Skip past the release */
634         action_list_t *list = action_trace;
635         action_list_t::reverse_iterator rit;
636         for (rit = list->rbegin(); rit != list->rend(); rit++)
637                 if (*rit == last_release)
638                         break;
639         ASSERT(rit != list->rend());
640
641         /* Find a prior:
642          *   load-acquire
643          * or
644          *   load --sb-> fence-acquire */
645         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
646         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
647         bool found_acquire_fences = false;
648         for ( ; rit != list->rend(); rit++) {
649                 ModelAction *prev = *rit;
650                 if (act->same_thread(prev))
651                         continue;
652
653                 int tid = id_to_int(prev->get_tid());
654
655                 if (prev->is_read() && act->same_var(prev)) {
656                         if (prev->is_acquire()) {
657                                 /* Found most recent load-acquire, don't need
658                                  * to search for more fences */
659                                 if (!found_acquire_fences)
660                                         return NULL;
661                         } else {
662                                 prior_loads[tid] = prev;
663                         }
664                 }
665                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
666                         found_acquire_fences = true;
667                         acquire_fences[tid] = prev;
668                 }
669         }
670
671         ModelAction *latest_backtrack = NULL;
672         for (unsigned int i = 0; i < acquire_fences.size(); i++)
673                 if (acquire_fences[i] && prior_loads[i])
674                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
675                                 latest_backtrack = acquire_fences[i];
676         return latest_backtrack;
677 }
678
679 /**
680  * @brief Find the last backtracking conflict for a ModelAction
681  *
682  * This function performs the search for the most recent conflicting action
683  * against which we should perform backtracking. This primary includes pairs of
684  * synchronizing actions which should be explored in the opposite execution
685  * order.
686  *
687  * @param act The current action
688  * @return The most recent action which conflicts with act
689  */
690 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
691 {
692         switch (act->get_type()) {
693         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
694         case ATOMIC_READ:
695         case ATOMIC_WRITE:
696         case ATOMIC_RMW: {
697                 ModelAction *ret = NULL;
698
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (prev->could_synchronize_with(act)) {
705                                 ret = prev;
706                                 break;
707                         }
708                 }
709
710                 ModelAction *ret2 = get_last_fence_conflict(act);
711                 if (!ret2)
712                         return ret;
713                 if (!ret)
714                         return ret2;
715                 if (*ret < *ret2)
716                         return ret2;
717                 return ret;
718         }
719         case ATOMIC_LOCK:
720         case ATOMIC_TRYLOCK: {
721                 /* linear search: from most recent to oldest */
722                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
723                 action_list_t::reverse_iterator rit;
724                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
725                         ModelAction *prev = *rit;
726                         if (act->is_conflicting_lock(prev))
727                                 return prev;
728                 }
729                 break;
730         }
731         case ATOMIC_UNLOCK: {
732                 /* linear search: from most recent to oldest */
733                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
734                 action_list_t::reverse_iterator rit;
735                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
736                         ModelAction *prev = *rit;
737                         if (!act->same_thread(prev) && prev->is_failed_trylock())
738                                 return prev;
739                 }
740                 break;
741         }
742         case ATOMIC_WAIT: {
743                 /* linear search: from most recent to oldest */
744                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
745                 action_list_t::reverse_iterator rit;
746                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
747                         ModelAction *prev = *rit;
748                         if (!act->same_thread(prev) && prev->is_failed_trylock())
749                                 return prev;
750                         if (!act->same_thread(prev) && prev->is_notify())
751                                 return prev;
752                 }
753                 break;
754         }
755
756         case ATOMIC_NOTIFY_ALL:
757         case ATOMIC_NOTIFY_ONE: {
758                 /* linear search: from most recent to oldest */
759                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
760                 action_list_t::reverse_iterator rit;
761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
762                         ModelAction *prev = *rit;
763                         if (!act->same_thread(prev) && prev->is_wait())
764                                 return prev;
765                 }
766                 break;
767         }
768         default:
769                 break;
770         }
771         return NULL;
772 }
773
774 /** This method finds backtracking points where we should try to
775  * reorder the parameter ModelAction against.
776  *
777  * @param the ModelAction to find backtracking points for.
778  */
779 void ModelChecker::set_backtracking(ModelAction *act)
780 {
781         Thread *t = get_thread(act);
782         ModelAction *prev = get_last_conflict(act);
783         if (prev == NULL)
784                 return;
785
786         Node *node = prev->get_node()->get_parent();
787
788         int low_tid, high_tid;
789         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
790                 low_tid = id_to_int(act->get_tid());
791                 high_tid = low_tid + 1;
792         } else {
793                 low_tid = 0;
794                 high_tid = get_num_threads();
795         }
796
797         for (int i = low_tid; i < high_tid; i++) {
798                 thread_id_t tid = int_to_id(i);
799
800                 /* Make sure this thread can be enabled here. */
801                 if (i >= node->get_num_threads())
802                         break;
803
804                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
805                 if (node->enabled_status(tid) != THREAD_ENABLED)
806                         continue;
807
808                 /* Check if this has been explored already */
809                 if (node->has_been_explored(tid))
810                         continue;
811
812                 /* See if fairness allows */
813                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
814                         bool unfair = false;
815                         for (int t = 0; t < node->get_num_threads(); t++) {
816                                 thread_id_t tother = int_to_id(t);
817                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
818                                         unfair = true;
819                                         break;
820                                 }
821                         }
822                         if (unfair)
823                                 continue;
824                 }
825
826                 /* See if CHESS-like yield fairness allows */
827                 if (model->params.yieldon) {
828                         bool unfair = false;
829                         for (int t = 0; t < node->get_num_threads(); t++) {
830                                 thread_id_t tother = int_to_id(t);
831                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
832                                         unfair = true;
833                                         break;
834                                 }
835                         }
836                         if (unfair)
837                                 continue;
838                 }
839                 
840                 /* Cache the latest backtracking point */
841                 set_latest_backtrack(prev);
842
843                 /* If this is a new backtracking point, mark the tree */
844                 if (!node->set_backtrack(tid))
845                         continue;
846                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
847                                         id_to_int(prev->get_tid()),
848                                         id_to_int(t->get_id()));
849                 if (DBG_ENABLED()) {
850                         prev->print();
851                         act->print();
852                 }
853         }
854 }
855
856 /**
857  * @brief Cache the a backtracking point as the "most recent", if eligible
858  *
859  * Note that this does not prepare the NodeStack for this backtracking
860  * operation, it only caches the action on a per-execution basis
861  *
862  * @param act The operation at which we should explore a different next action
863  * (i.e., backtracking point)
864  * @return True, if this action is now the most recent backtracking point;
865  * false otherwise
866  */
867 bool ModelChecker::set_latest_backtrack(ModelAction *act)
868 {
869         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
870                 priv->next_backtrack = act;
871                 return true;
872         }
873         return false;
874 }
875
876 /**
877  * Returns last backtracking point. The model checker will explore a different
878  * path for this point in the next execution.
879  * @return The ModelAction at which the next execution should diverge.
880  */
881 ModelAction * ModelChecker::get_next_backtrack()
882 {
883         ModelAction *next = priv->next_backtrack;
884         priv->next_backtrack = NULL;
885         return next;
886 }
887
888 /**
889  * Processes a read model action.
890  * @param curr is the read model action to process.
891  * @return True if processing this read updates the mo_graph.
892  */
893 bool ModelChecker::process_read(ModelAction *curr)
894 {
895         Node *node = curr->get_node();
896         while (true) {
897                 bool updated = false;
898                 switch (node->get_read_from_status()) {
899                 case READ_FROM_PAST: {
900                         const ModelAction *rf = node->get_read_from_past();
901                         ASSERT(rf);
902
903                         mo_graph->startChanges();
904
905                         ASSERT(!is_infeasible());
906                         if (!check_recency(curr, rf)) {
907                                 if (node->increment_read_from()) {
908                                         mo_graph->rollbackChanges();
909                                         continue;
910                                 } else {
911                                         priv->too_many_reads = true;
912                                 }
913                         }
914
915                         updated = r_modification_order(curr, rf);
916                         read_from(curr, rf);
917                         mo_graph->commitChanges();
918                         mo_check_promises(curr, true);
919                         break;
920                 }
921                 case READ_FROM_PROMISE: {
922                         Promise *promise = curr->get_node()->get_read_from_promise();
923                         if (promise->add_reader(curr))
924                                 priv->failed_promise = true;
925                         curr->set_read_from_promise(promise);
926                         mo_graph->startChanges();
927                         if (!check_recency(curr, promise))
928                                 priv->too_many_reads = true;
929                         updated = r_modification_order(curr, promise);
930                         mo_graph->commitChanges();
931                         break;
932                 }
933                 case READ_FROM_FUTURE: {
934                         /* Read from future value */
935                         struct future_value fv = node->get_future_value();
936                         Promise *promise = new Promise(curr, fv);
937                         curr->set_read_from_promise(promise);
938                         promises->push_back(promise);
939                         mo_graph->startChanges();
940                         updated = r_modification_order(curr, promise);
941                         mo_graph->commitChanges();
942                         break;
943                 }
944                 default:
945                         ASSERT(false);
946                 }
947                 get_thread(curr)->set_return_value(curr->get_return_value());
948                 return updated;
949         }
950 }
951
952 /**
953  * Processes a lock, trylock, or unlock model action.  @param curr is
954  * the read model action to process.
955  *
956  * The try lock operation checks whether the lock is taken.  If not,
957  * it falls to the normal lock operation case.  If so, it returns
958  * fail.
959  *
960  * The lock operation has already been checked that it is enabled, so
961  * it just grabs the lock and synchronizes with the previous unlock.
962  *
963  * The unlock operation has to re-enable all of the threads that are
964  * waiting on the lock.
965  *
966  * @return True if synchronization was updated; false otherwise
967  */
968 bool ModelChecker::process_mutex(ModelAction *curr)
969 {
970         std::mutex *mutex = curr->get_mutex();
971         struct std::mutex_state *state = NULL;
972
973         if (mutex)
974                 state = mutex->get_state();
975
976         switch (curr->get_type()) {
977         case ATOMIC_TRYLOCK: {
978                 bool success = !state->locked;
979                 curr->set_try_lock(success);
980                 if (!success) {
981                         get_thread(curr)->set_return_value(0);
982                         break;
983                 }
984                 get_thread(curr)->set_return_value(1);
985         }
986                 //otherwise fall into the lock case
987         case ATOMIC_LOCK: {
988                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
989                         assert_bug("Lock access before initialization");
990                 state->locked = get_thread(curr);
991                 ModelAction *unlock = get_last_unlock(curr);
992                 //synchronize with the previous unlock statement
993                 if (unlock != NULL) {
994                         curr->synchronize_with(unlock);
995                         return true;
996                 }
997                 break;
998         }
999         case ATOMIC_UNLOCK: {
1000                 //unlock the lock
1001                 state->locked = NULL;
1002                 //wake up the other threads
1003                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
1004                 //activate all the waiting threads
1005                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1006                         scheduler->wake(get_thread(*rit));
1007                 }
1008                 waiters->clear();
1009                 break;
1010         }
1011         case ATOMIC_WAIT: {
1012                 //unlock the lock
1013                 state->locked = NULL;
1014                 //wake up the other threads
1015                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
1016                 //activate all the waiting threads
1017                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1018                         scheduler->wake(get_thread(*rit));
1019                 }
1020                 waiters->clear();
1021                 //check whether we should go to sleep or not...simulate spurious failures
1022                 if (curr->get_node()->get_misc() == 0) {
1023                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1024                         //disable us
1025                         scheduler->sleep(get_thread(curr));
1026                 }
1027                 break;
1028         }
1029         case ATOMIC_NOTIFY_ALL: {
1030                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1031                 //activate all the waiting threads
1032                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1033                         scheduler->wake(get_thread(*rit));
1034                 }
1035                 waiters->clear();
1036                 break;
1037         }
1038         case ATOMIC_NOTIFY_ONE: {
1039                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1040                 int wakeupthread = curr->get_node()->get_misc();
1041                 action_list_t::iterator it = waiters->begin();
1042                 advance(it, wakeupthread);
1043                 scheduler->wake(get_thread(*it));
1044                 waiters->erase(it);
1045                 break;
1046         }
1047
1048         default:
1049                 ASSERT(0);
1050         }
1051         return false;
1052 }
1053
1054 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1055 {
1056         /* Do more ambitious checks now that mo is more complete */
1057         if (mo_may_allow(writer, reader)) {
1058                 Node *node = reader->get_node();
1059
1060                 /* Find an ancestor thread which exists at the time of the reader */
1061                 Thread *write_thread = get_thread(writer);
1062                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1063                         write_thread = write_thread->get_parent();
1064
1065                 struct future_value fv = {
1066                         writer->get_write_value(),
1067                         writer->get_seq_number() + params.maxfuturedelay,
1068                         write_thread->get_id(),
1069                 };
1070                 if (node->add_future_value(fv))
1071                         set_latest_backtrack(reader);
1072         }
1073 }
1074
1075 /**
1076  * Process a write ModelAction
1077  * @param curr The ModelAction to process
1078  * @return True if the mo_graph was updated or promises were resolved
1079  */
1080 bool ModelChecker::process_write(ModelAction *curr)
1081 {
1082         /* Readers to which we may send our future value */
1083         ModelVector<ModelAction *> send_fv;
1084
1085         const ModelAction *earliest_promise_reader;
1086         bool updated_promises = false;
1087
1088         bool updated_mod_order = w_modification_order(curr, &send_fv);
1089         Promise *promise = pop_promise_to_resolve(curr);
1090
1091         if (promise) {
1092                 earliest_promise_reader = promise->get_reader(0);
1093                 updated_promises = resolve_promise(curr, promise);
1094         } else
1095                 earliest_promise_reader = NULL;
1096
1097         /* Don't send future values to reads after the Promise we resolve */
1098         for (unsigned int i = 0; i < send_fv.size(); i++) {
1099                 ModelAction *read = send_fv[i];
1100                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1101                         futurevalues->push_back(PendingFutureValue(curr, read));
1102         }
1103
1104         if (promises->empty()) {
1105                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1106                         struct PendingFutureValue pfv = (*futurevalues)[i];
1107                         add_future_value(pfv.writer, pfv.reader);
1108                 }
1109                 futurevalues->clear();
1110         }
1111
1112         mo_graph->commitChanges();
1113         mo_check_promises(curr, false);
1114
1115         get_thread(curr)->set_return_value(VALUE_NONE);
1116         return updated_mod_order || updated_promises;
1117 }
1118
1119 /**
1120  * Process a fence ModelAction
1121  * @param curr The ModelAction to process
1122  * @return True if synchronization was updated
1123  */
1124 bool ModelChecker::process_fence(ModelAction *curr)
1125 {
1126         /*
1127          * fence-relaxed: no-op
1128          * fence-release: only log the occurence (not in this function), for
1129          *   use in later synchronization
1130          * fence-acquire (this function): search for hypothetical release
1131          *   sequences
1132          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1133          */
1134         bool updated = false;
1135         if (curr->is_acquire()) {
1136                 action_list_t *list = action_trace;
1137                 action_list_t::reverse_iterator rit;
1138                 /* Find X : is_read(X) && X --sb-> curr */
1139                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1140                         ModelAction *act = *rit;
1141                         if (act == curr)
1142                                 continue;
1143                         if (act->get_tid() != curr->get_tid())
1144                                 continue;
1145                         /* Stop at the beginning of the thread */
1146                         if (act->is_thread_start())
1147                                 break;
1148                         /* Stop once we reach a prior fence-acquire */
1149                         if (act->is_fence() && act->is_acquire())
1150                                 break;
1151                         if (!act->is_read())
1152                                 continue;
1153                         /* read-acquire will find its own release sequences */
1154                         if (act->is_acquire())
1155                                 continue;
1156
1157                         /* Establish hypothetical release sequences */
1158                         rel_heads_list_t release_heads;
1159                         get_release_seq_heads(curr, act, &release_heads);
1160                         for (unsigned int i = 0; i < release_heads.size(); i++)
1161                                 if (!curr->synchronize_with(release_heads[i]))
1162                                         set_bad_synchronization();
1163                         if (release_heads.size() != 0)
1164                                 updated = true;
1165                 }
1166         }
1167         return updated;
1168 }
1169
1170 /**
1171  * @brief Process the current action for thread-related activity
1172  *
1173  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1174  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1175  * synchronization, etc.  This function is a no-op for non-THREAD actions
1176  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1177  *
1178  * @param curr The current action
1179  * @return True if synchronization was updated or a thread completed
1180  */
1181 bool ModelChecker::process_thread_action(ModelAction *curr)
1182 {
1183         bool updated = false;
1184
1185         switch (curr->get_type()) {
1186         case THREAD_CREATE: {
1187                 thrd_t *thrd = (thrd_t *)curr->get_location();
1188                 struct thread_params *params = (struct thread_params *)curr->get_value();
1189                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1190                 add_thread(th);
1191                 th->set_creation(curr);
1192                 /* Promises can be satisfied by children */
1193                 for (unsigned int i = 0; i < promises->size(); i++) {
1194                         Promise *promise = (*promises)[i];
1195                         if (promise->thread_is_available(curr->get_tid()))
1196                                 promise->add_thread(th->get_id());
1197                 }
1198                 break;
1199         }
1200         case THREAD_JOIN: {
1201                 Thread *blocking = curr->get_thread_operand();
1202                 ModelAction *act = get_last_action(blocking->get_id());
1203                 curr->synchronize_with(act);
1204                 updated = true; /* trigger rel-seq checks */
1205                 break;
1206         }
1207         case THREAD_FINISH: {
1208                 Thread *th = get_thread(curr);
1209                 while (!th->wait_list_empty()) {
1210                         ModelAction *act = th->pop_wait_list();
1211                         scheduler->wake(get_thread(act));
1212                 }
1213                 th->complete();
1214                 /* Completed thread can't satisfy promises */
1215                 for (unsigned int i = 0; i < promises->size(); i++) {
1216                         Promise *promise = (*promises)[i];
1217                         if (promise->thread_is_available(th->get_id()))
1218                                 if (promise->eliminate_thread(th->get_id()))
1219                                         priv->failed_promise = true;
1220                 }
1221                 updated = true; /* trigger rel-seq checks */
1222                 break;
1223         }
1224         case THREAD_START: {
1225                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1226                 break;
1227         }
1228         default:
1229                 break;
1230         }
1231
1232         return updated;
1233 }
1234
1235 /**
1236  * @brief Process the current action for release sequence fixup activity
1237  *
1238  * Performs model-checker release sequence fixups for the current action,
1239  * forcing a single pending release sequence to break (with a given, potential
1240  * "loose" write) or to complete (i.e., synchronize). If a pending release
1241  * sequence forms a complete release sequence, then we must perform the fixup
1242  * synchronization, mo_graph additions, etc.
1243  *
1244  * @param curr The current action; must be a release sequence fixup action
1245  * @param work_queue The work queue to which to add work items as they are
1246  * generated
1247  */
1248 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1249 {
1250         const ModelAction *write = curr->get_node()->get_relseq_break();
1251         struct release_seq *sequence = pending_rel_seqs->back();
1252         pending_rel_seqs->pop_back();
1253         ASSERT(sequence);
1254         ModelAction *acquire = sequence->acquire;
1255         const ModelAction *rf = sequence->rf;
1256         const ModelAction *release = sequence->release;
1257         ASSERT(acquire);
1258         ASSERT(release);
1259         ASSERT(rf);
1260         ASSERT(release->same_thread(rf));
1261
1262         if (write == NULL) {
1263                 /**
1264                  * @todo Forcing a synchronization requires that we set
1265                  * modification order constraints. For instance, we can't allow
1266                  * a fixup sequence in which two separate read-acquire
1267                  * operations read from the same sequence, where the first one
1268                  * synchronizes and the other doesn't. Essentially, we can't
1269                  * allow any writes to insert themselves between 'release' and
1270                  * 'rf'
1271                  */
1272
1273                 /* Must synchronize */
1274                 if (!acquire->synchronize_with(release)) {
1275                         set_bad_synchronization();
1276                         return;
1277                 }
1278                 /* Re-check all pending release sequences */
1279                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1280                 /* Re-check act for mo_graph edges */
1281                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1282
1283                 /* propagate synchronization to later actions */
1284                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1285                 for (; (*rit) != acquire; rit++) {
1286                         ModelAction *propagate = *rit;
1287                         if (acquire->happens_before(propagate)) {
1288                                 propagate->synchronize_with(acquire);
1289                                 /* Re-check 'propagate' for mo_graph edges */
1290                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1291                         }
1292                 }
1293         } else {
1294                 /* Break release sequence with new edges:
1295                  *   release --mo--> write --mo--> rf */
1296                 mo_graph->addEdge(release, write);
1297                 mo_graph->addEdge(write, rf);
1298         }
1299
1300         /* See if we have realized a data race */
1301         checkDataRaces();
1302 }
1303
1304 /**
1305  * Initialize the current action by performing one or more of the following
1306  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1307  * in the NodeStack, manipulating backtracking sets, allocating and
1308  * initializing clock vectors, and computing the promises to fulfill.
1309  *
1310  * @param curr The current action, as passed from the user context; may be
1311  * freed/invalidated after the execution of this function, with a different
1312  * action "returned" its place (pass-by-reference)
1313  * @return True if curr is a newly-explored action; false otherwise
1314  */
1315 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1316 {
1317         ModelAction *newcurr;
1318
1319         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1320                 newcurr = process_rmw(*curr);
1321                 delete *curr;
1322
1323                 if (newcurr->is_rmw())
1324                         compute_promises(newcurr);
1325
1326                 *curr = newcurr;
1327                 return false;
1328         }
1329
1330         (*curr)->set_seq_number(get_next_seq_num());
1331
1332         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1333         if (newcurr) {
1334                 /* First restore type and order in case of RMW operation */
1335                 if ((*curr)->is_rmwr())
1336                         newcurr->copy_typeandorder(*curr);
1337
1338                 ASSERT((*curr)->get_location() == newcurr->get_location());
1339                 newcurr->copy_from_new(*curr);
1340
1341                 /* Discard duplicate ModelAction; use action from NodeStack */
1342                 delete *curr;
1343
1344                 /* Always compute new clock vector */
1345                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1346
1347                 *curr = newcurr;
1348                 return false; /* Action was explored previously */
1349         } else {
1350                 newcurr = *curr;
1351
1352                 /* Always compute new clock vector */
1353                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1354
1355                 /* Assign most recent release fence */
1356                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1357
1358                 /*
1359                  * Perform one-time actions when pushing new ModelAction onto
1360                  * NodeStack
1361                  */
1362                 if (newcurr->is_write())
1363                         compute_promises(newcurr);
1364                 else if (newcurr->is_relseq_fixup())
1365                         compute_relseq_breakwrites(newcurr);
1366                 else if (newcurr->is_wait())
1367                         newcurr->get_node()->set_misc_max(2);
1368                 else if (newcurr->is_notify_one()) {
1369                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1370                 }
1371                 return true; /* This was a new ModelAction */
1372         }
1373 }
1374
1375 /**
1376  * @brief Establish reads-from relation between two actions
1377  *
1378  * Perform basic operations involved with establishing a concrete rf relation,
1379  * including setting the ModelAction data and checking for release sequences.
1380  *
1381  * @param act The action that is reading (must be a read)
1382  * @param rf The action from which we are reading (must be a write)
1383  *
1384  * @return True if this read established synchronization
1385  */
1386 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1387 {
1388         ASSERT(rf);
1389         ASSERT(rf->is_write());
1390
1391         act->set_read_from(rf);
1392         if (act->is_acquire()) {
1393                 rel_heads_list_t release_heads;
1394                 get_release_seq_heads(act, act, &release_heads);
1395                 int num_heads = release_heads.size();
1396                 for (unsigned int i = 0; i < release_heads.size(); i++)
1397                         if (!act->synchronize_with(release_heads[i])) {
1398                                 set_bad_synchronization();
1399                                 num_heads--;
1400                         }
1401                 return num_heads > 0;
1402         }
1403         return false;
1404 }
1405
1406 /**
1407  * Check promises and eliminate potentially-satisfying threads when a thread is
1408  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1409  * no longer satisfy a promise generated from that thread.
1410  *
1411  * @param blocker The thread on which a thread is waiting
1412  * @param waiting The waiting thread
1413  */
1414 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1415 {
1416         for (unsigned int i = 0; i < promises->size(); i++) {
1417                 Promise *promise = (*promises)[i];
1418                 if (!promise->thread_is_available(waiting->get_id()))
1419                         continue;
1420                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1421                         ModelAction *reader = promise->get_reader(j);
1422                         if (reader->get_tid() != blocker->get_id())
1423                                 continue;
1424                         if (promise->eliminate_thread(waiting->get_id())) {
1425                                 /* Promise has failed */
1426                                 priv->failed_promise = true;
1427                         } else {
1428                                 /* Only eliminate the 'waiting' thread once */
1429                                 return;
1430                         }
1431                 }
1432         }
1433 }
1434
1435 /**
1436  * @brief Check whether a model action is enabled.
1437  *
1438  * Checks whether a lock or join operation would be successful (i.e., is the
1439  * lock already locked, or is the joined thread already complete). If not, put
1440  * the action in a waiter list.
1441  *
1442  * @param curr is the ModelAction to check whether it is enabled.
1443  * @return a bool that indicates whether the action is enabled.
1444  */
1445 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1446         if (curr->is_lock()) {
1447                 std::mutex *lock = (std::mutex *)curr->get_location();
1448                 struct std::mutex_state *state = lock->get_state();
1449                 if (state->locked) {
1450                         //Stick the action in the appropriate waiting queue
1451                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1452                         return false;
1453                 }
1454         } else if (curr->get_type() == THREAD_JOIN) {
1455                 Thread *blocking = (Thread *)curr->get_location();
1456                 if (!blocking->is_complete()) {
1457                         blocking->push_wait_list(curr);
1458                         thread_blocking_check_promises(blocking, get_thread(curr));
1459                         return false;
1460                 }
1461         }
1462
1463         return true;
1464 }
1465
1466 /**
1467  * This is the heart of the model checker routine. It performs model-checking
1468  * actions corresponding to a given "current action." Among other processes, it
1469  * calculates reads-from relationships, updates synchronization clock vectors,
1470  * forms a memory_order constraints graph, and handles replay/backtrack
1471  * execution when running permutations of previously-observed executions.
1472  *
1473  * @param curr The current action to process
1474  * @return The ModelAction that is actually executed; may be different than
1475  * curr; may be NULL, if the current action is not enabled to run
1476  */
1477 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1478 {
1479         ASSERT(curr);
1480         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1481
1482         if (!check_action_enabled(curr)) {
1483                 /* Make the execution look like we chose to run this action
1484                  * much later, when a lock/join can succeed */
1485                 get_thread(curr)->set_pending(curr);
1486                 scheduler->sleep(get_thread(curr));
1487                 return NULL;
1488         }
1489
1490         bool newly_explored = initialize_curr_action(&curr);
1491
1492         DBG();
1493         if (DBG_ENABLED())
1494                 curr->print();
1495
1496         wake_up_sleeping_actions(curr);
1497
1498         /* Compute fairness information for CHESS yield algorithm */
1499         if (model->params.yieldon) {
1500                 curr->get_node()->update_yield(scheduler);
1501         }
1502
1503         /* Add the action to lists before any other model-checking tasks */
1504         if (!second_part_of_rmw)
1505                 add_action_to_lists(curr);
1506
1507         /* Build may_read_from set for newly-created actions */
1508         if (newly_explored && curr->is_read())
1509                 build_may_read_from(curr);
1510
1511         /* Initialize work_queue with the "current action" work */
1512         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1513         while (!work_queue.empty() && !has_asserted()) {
1514                 WorkQueueEntry work = work_queue.front();
1515                 work_queue.pop_front();
1516
1517                 switch (work.type) {
1518                 case WORK_CHECK_CURR_ACTION: {
1519                         ModelAction *act = work.action;
1520                         bool update = false; /* update this location's release seq's */
1521                         bool update_all = false; /* update all release seq's */
1522
1523                         if (process_thread_action(curr))
1524                                 update_all = true;
1525
1526                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1527                                 update = true;
1528
1529                         if (act->is_write() && process_write(act))
1530                                 update = true;
1531
1532                         if (act->is_fence() && process_fence(act))
1533                                 update_all = true;
1534
1535                         if (act->is_mutex_op() && process_mutex(act))
1536                                 update_all = true;
1537
1538                         if (act->is_relseq_fixup())
1539                                 process_relseq_fixup(curr, &work_queue);
1540
1541                         if (update_all)
1542                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1543                         else if (update)
1544                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1545                         break;
1546                 }
1547                 case WORK_CHECK_RELEASE_SEQ:
1548                         resolve_release_sequences(work.location, &work_queue);
1549                         break;
1550                 case WORK_CHECK_MO_EDGES: {
1551                         /** @todo Complete verification of work_queue */
1552                         ModelAction *act = work.action;
1553                         bool updated = false;
1554
1555                         if (act->is_read()) {
1556                                 const ModelAction *rf = act->get_reads_from();
1557                                 const Promise *promise = act->get_reads_from_promise();
1558                                 if (rf) {
1559                                         if (r_modification_order(act, rf))
1560                                                 updated = true;
1561                                 } else if (promise) {
1562                                         if (r_modification_order(act, promise))
1563                                                 updated = true;
1564                                 }
1565                         }
1566                         if (act->is_write()) {
1567                                 if (w_modification_order(act, NULL))
1568                                         updated = true;
1569                         }
1570                         mo_graph->commitChanges();
1571
1572                         if (updated)
1573                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1574                         break;
1575                 }
1576                 default:
1577                         ASSERT(false);
1578                         break;
1579                 }
1580         }
1581
1582         check_curr_backtracking(curr);
1583         set_backtracking(curr);
1584         return curr;
1585 }
1586
1587 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1588 {
1589         Node *currnode = curr->get_node();
1590         Node *parnode = currnode->get_parent();
1591
1592         if ((parnode && !parnode->backtrack_empty()) ||
1593                          !currnode->misc_empty() ||
1594                          !currnode->read_from_empty() ||
1595                          !currnode->promise_empty() ||
1596                          !currnode->relseq_break_empty()) {
1597                 set_latest_backtrack(curr);
1598         }
1599 }
1600
1601 bool ModelChecker::promises_expired() const
1602 {
1603         for (unsigned int i = 0; i < promises->size(); i++) {
1604                 Promise *promise = (*promises)[i];
1605                 if (promise->get_expiration() < priv->used_sequence_numbers)
1606                         return true;
1607         }
1608         return false;
1609 }
1610
1611 /**
1612  * This is the strongest feasibility check available.
1613  * @return whether the current trace (partial or complete) must be a prefix of
1614  * a feasible trace.
1615  */
1616 bool ModelChecker::isfeasibleprefix() const
1617 {
1618         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1619 }
1620
1621 /**
1622  * Print disagnostic information about an infeasible execution
1623  * @param prefix A string to prefix the output with; if NULL, then a default
1624  * message prefix will be provided
1625  */
1626 void ModelChecker::print_infeasibility(const char *prefix) const
1627 {
1628         char buf[100];
1629         char *ptr = buf;
1630         if (mo_graph->checkForCycles())
1631                 ptr += sprintf(ptr, "[mo cycle]");
1632         if (priv->failed_promise)
1633                 ptr += sprintf(ptr, "[failed promise]");
1634         if (priv->too_many_reads)
1635                 ptr += sprintf(ptr, "[too many reads]");
1636         if (priv->no_valid_reads)
1637                 ptr += sprintf(ptr, "[no valid reads-from]");
1638         if (priv->bad_synchronization)
1639                 ptr += sprintf(ptr, "[bad sw ordering]");
1640         if (promises_expired())
1641                 ptr += sprintf(ptr, "[promise expired]");
1642         if (promises->size() != 0)
1643                 ptr += sprintf(ptr, "[unresolved promise]");
1644         if (ptr != buf)
1645                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1646 }
1647
1648 /**
1649  * Returns whether the current completed trace is feasible, except for pending
1650  * release sequences.
1651  */
1652 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1653 {
1654         return !is_infeasible() && promises->size() == 0;
1655 }
1656
1657 /**
1658  * Check if the current partial trace is infeasible. Does not check any
1659  * end-of-execution flags, which might rule out the execution. Thus, this is
1660  * useful only for ruling an execution as infeasible.
1661  * @return whether the current partial trace is infeasible.
1662  */
1663 bool ModelChecker::is_infeasible() const
1664 {
1665         return mo_graph->checkForCycles() ||
1666                 priv->no_valid_reads ||
1667                 priv->failed_promise ||
1668                 priv->too_many_reads ||
1669                 priv->bad_synchronization ||
1670                 promises_expired();
1671 }
1672
1673 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1674 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1675         ModelAction *lastread = get_last_action(act->get_tid());
1676         lastread->process_rmw(act);
1677         if (act->is_rmw()) {
1678                 if (lastread->get_reads_from())
1679                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1680                 else
1681                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1682                 mo_graph->commitChanges();
1683         }
1684         return lastread;
1685 }
1686
1687 /**
1688  * A helper function for ModelChecker::check_recency, to check if the current
1689  * thread is able to read from a different write/promise for 'params.maxreads'
1690  * number of steps and if that write/promise should become visible (i.e., is
1691  * ordered later in the modification order). This helps model memory liveness.
1692  *
1693  * @param curr The current action. Must be a read.
1694  * @param rf The write/promise from which we plan to read
1695  * @param other_rf The write/promise from which we may read
1696  * @return True if we were able to read from other_rf for params.maxreads steps
1697  */
1698 template <typename T, typename U>
1699 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1700 {
1701         /* Need a different write/promise */
1702         if (other_rf->equals(rf))
1703                 return false;
1704
1705         /* Only look for "newer" writes/promises */
1706         if (!mo_graph->checkReachable(rf, other_rf))
1707                 return false;
1708
1709         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1710         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1711         action_list_t::reverse_iterator rit = list->rbegin();
1712         ASSERT((*rit) == curr);
1713         /* Skip past curr */
1714         rit++;
1715
1716         /* Does this write/promise work for everyone? */
1717         for (int i = 0; i < params.maxreads; i++, rit++) {
1718                 ModelAction *act = *rit;
1719                 if (!act->may_read_from(other_rf))
1720                         return false;
1721         }
1722         return true;
1723 }
1724
1725 /**
1726  * Checks whether a thread has read from the same write or Promise for too many
1727  * times without seeing the effects of a later write/Promise.
1728  *
1729  * Basic idea:
1730  * 1) there must a different write/promise that we could read from,
1731  * 2) we must have read from the same write/promise in excess of maxreads times,
1732  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1733  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1734  *
1735  * If so, we decide that the execution is no longer feasible.
1736  *
1737  * @param curr The current action. Must be a read.
1738  * @param rf The ModelAction/Promise from which we might read.
1739  * @return True if the read should succeed; false otherwise
1740  */
1741 template <typename T>
1742 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1743 {
1744         if (!params.maxreads)
1745                 return true;
1746
1747         //NOTE: Next check is just optimization, not really necessary....
1748         if (curr->get_node()->get_read_from_past_size() +
1749                         curr->get_node()->get_read_from_promise_size() <= 1)
1750                 return true;
1751
1752         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1753         int tid = id_to_int(curr->get_tid());
1754         ASSERT(tid < (int)thrd_lists->size());
1755         action_list_t *list = &(*thrd_lists)[tid];
1756         action_list_t::reverse_iterator rit = list->rbegin();
1757         ASSERT((*rit) == curr);
1758         /* Skip past curr */
1759         rit++;
1760
1761         action_list_t::reverse_iterator ritcopy = rit;
1762         /* See if we have enough reads from the same value */
1763         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1764                 if (ritcopy == list->rend())
1765                         return true;
1766                 ModelAction *act = *ritcopy;
1767                 if (!act->is_read())
1768                         return true;
1769                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1770                         return true;
1771                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1772                         return true;
1773                 if (act->get_node()->get_read_from_past_size() +
1774                                 act->get_node()->get_read_from_promise_size() <= 1)
1775                         return true;
1776         }
1777         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1778                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1779                 if (should_read_instead(curr, rf, write))
1780                         return false; /* liveness failure */
1781         }
1782         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1783                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1784                 if (should_read_instead(curr, rf, promise))
1785                         return false; /* liveness failure */
1786         }
1787         return true;
1788 }
1789
1790 /**
1791  * Updates the mo_graph with the constraints imposed from the current
1792  * read.
1793  *
1794  * Basic idea is the following: Go through each other thread and find
1795  * the last action that happened before our read.  Two cases:
1796  *
1797  * (1) The action is a write => that write must either occur before
1798  * the write we read from or be the write we read from.
1799  *
1800  * (2) The action is a read => the write that that action read from
1801  * must occur before the write we read from or be the same write.
1802  *
1803  * @param curr The current action. Must be a read.
1804  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1805  * @return True if modification order edges were added; false otherwise
1806  */
1807 template <typename rf_type>
1808 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1809 {
1810         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1811         unsigned int i;
1812         bool added = false;
1813         ASSERT(curr->is_read());
1814
1815         /* Last SC fence in the current thread */
1816         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1817         ModelAction *last_sc_write = NULL;
1818         if (curr->is_seqcst())
1819                 last_sc_write = get_last_seq_cst_write(curr);
1820
1821         /* Iterate over all threads */
1822         for (i = 0; i < thrd_lists->size(); i++) {
1823                 /* Last SC fence in thread i */
1824                 ModelAction *last_sc_fence_thread_local = NULL;
1825                 if (int_to_id((int)i) != curr->get_tid())
1826                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1827
1828                 /* Last SC fence in thread i, before last SC fence in current thread */
1829                 ModelAction *last_sc_fence_thread_before = NULL;
1830                 if (last_sc_fence_local)
1831                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1832
1833                 /* Iterate over actions in thread, starting from most recent */
1834                 action_list_t *list = &(*thrd_lists)[i];
1835                 action_list_t::reverse_iterator rit;
1836                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1837                         ModelAction *act = *rit;
1838
1839                         /* Skip curr */
1840                         if (act == curr)
1841                                 continue;
1842                         /* Don't want to add reflexive edges on 'rf' */
1843                         if (act->equals(rf)) {
1844                                 if (act->happens_before(curr))
1845                                         break;
1846                                 else
1847                                         continue;
1848                         }
1849
1850                         if (act->is_write()) {
1851                                 /* C++, Section 29.3 statement 5 */
1852                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1853                                                 *act < *last_sc_fence_thread_local) {
1854                                         added = mo_graph->addEdge(act, rf) || added;
1855                                         break;
1856                                 }
1857                                 /* C++, Section 29.3 statement 4 */
1858                                 else if (act->is_seqcst() && last_sc_fence_local &&
1859                                                 *act < *last_sc_fence_local) {
1860                                         added = mo_graph->addEdge(act, rf) || added;
1861                                         break;
1862                                 }
1863                                 /* C++, Section 29.3 statement 6 */
1864                                 else if (last_sc_fence_thread_before &&
1865                                                 *act < *last_sc_fence_thread_before) {
1866                                         added = mo_graph->addEdge(act, rf) || added;
1867                                         break;
1868                                 }
1869                         }
1870
1871                         /* C++, Section 29.3 statement 3 (second subpoint) */
1872                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1873                                 added = mo_graph->addEdge(act, rf) || added;
1874                                 break;
1875                         }
1876
1877                         /*
1878                          * Include at most one act per-thread that "happens
1879                          * before" curr
1880                          */
1881                         if (act->happens_before(curr)) {
1882                                 if (act->is_write()) {
1883                                         added = mo_graph->addEdge(act, rf) || added;
1884                                 } else {
1885                                         const ModelAction *prevrf = act->get_reads_from();
1886                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1887                                         if (prevrf) {
1888                                                 if (!prevrf->equals(rf))
1889                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1890                                         } else if (!prevrf_promise->equals(rf)) {
1891                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1892                                         }
1893                                 }
1894                                 break;
1895                         }
1896                 }
1897         }
1898
1899         /*
1900          * All compatible, thread-exclusive promises must be ordered after any
1901          * concrete loads from the same thread
1902          */
1903         for (unsigned int i = 0; i < promises->size(); i++)
1904                 if ((*promises)[i]->is_compatible_exclusive(curr))
1905                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1906
1907         return added;
1908 }
1909
1910 /**
1911  * Updates the mo_graph with the constraints imposed from the current write.
1912  *
1913  * Basic idea is the following: Go through each other thread and find
1914  * the lastest action that happened before our write.  Two cases:
1915  *
1916  * (1) The action is a write => that write must occur before
1917  * the current write
1918  *
1919  * (2) The action is a read => the write that that action read from
1920  * must occur before the current write.
1921  *
1922  * This method also handles two other issues:
1923  *
1924  * (I) Sequential Consistency: Making sure that if the current write is
1925  * seq_cst, that it occurs after the previous seq_cst write.
1926  *
1927  * (II) Sending the write back to non-synchronizing reads.
1928  *
1929  * @param curr The current action. Must be a write.
1930  * @param send_fv A vector for stashing reads to which we may pass our future
1931  * value. If NULL, then don't record any future values.
1932  * @return True if modification order edges were added; false otherwise
1933  */
1934 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1935 {
1936         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1937         unsigned int i;
1938         bool added = false;
1939         ASSERT(curr->is_write());
1940
1941         if (curr->is_seqcst()) {
1942                 /* We have to at least see the last sequentially consistent write,
1943                          so we are initialized. */
1944                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1945                 if (last_seq_cst != NULL) {
1946                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1947                 }
1948         }
1949
1950         /* Last SC fence in the current thread */
1951         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1952
1953         /* Iterate over all threads */
1954         for (i = 0; i < thrd_lists->size(); i++) {
1955                 /* Last SC fence in thread i, before last SC fence in current thread */
1956                 ModelAction *last_sc_fence_thread_before = NULL;
1957                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1958                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1959
1960                 /* Iterate over actions in thread, starting from most recent */
1961                 action_list_t *list = &(*thrd_lists)[i];
1962                 action_list_t::reverse_iterator rit;
1963                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1964                         ModelAction *act = *rit;
1965                         if (act == curr) {
1966                                 /*
1967                                  * 1) If RMW and it actually read from something, then we
1968                                  * already have all relevant edges, so just skip to next
1969                                  * thread.
1970                                  *
1971                                  * 2) If RMW and it didn't read from anything, we should
1972                                  * whatever edge we can get to speed up convergence.
1973                                  *
1974                                  * 3) If normal write, we need to look at earlier actions, so
1975                                  * continue processing list.
1976                                  */
1977                                 if (curr->is_rmw()) {
1978                                         if (curr->get_reads_from() != NULL)
1979                                                 break;
1980                                         else
1981                                                 continue;
1982                                 } else
1983                                         continue;
1984                         }
1985
1986                         /* C++, Section 29.3 statement 7 */
1987                         if (last_sc_fence_thread_before && act->is_write() &&
1988                                         *act < *last_sc_fence_thread_before) {
1989                                 added = mo_graph->addEdge(act, curr) || added;
1990                                 break;
1991                         }
1992
1993                         /*
1994                          * Include at most one act per-thread that "happens
1995                          * before" curr
1996                          */
1997                         if (act->happens_before(curr)) {
1998                                 /*
1999                                  * Note: if act is RMW, just add edge:
2000                                  *   act --mo--> curr
2001                                  * The following edge should be handled elsewhere:
2002                                  *   readfrom(act) --mo--> act
2003                                  */
2004                                 if (act->is_write())
2005                                         added = mo_graph->addEdge(act, curr) || added;
2006                                 else if (act->is_read()) {
2007                                         //if previous read accessed a null, just keep going
2008                                         if (act->get_reads_from() == NULL)
2009                                                 continue;
2010                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2011                                 }
2012                                 break;
2013                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2014                                                      !act->same_thread(curr)) {
2015                                 /* We have an action that:
2016                                    (1) did not happen before us
2017                                    (2) is a read and we are a write
2018                                    (3) cannot synchronize with us
2019                                    (4) is in a different thread
2020                                    =>
2021                                    that read could potentially read from our write.  Note that
2022                                    these checks are overly conservative at this point, we'll
2023                                    do more checks before actually removing the
2024                                    pendingfuturevalue.
2025
2026                                  */
2027                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2028                                         if (!is_infeasible())
2029                                                 send_fv->push_back(act);
2030                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2031                                                 add_future_value(curr, act);
2032                                 }
2033                         }
2034                 }
2035         }
2036
2037         /*
2038          * All compatible, thread-exclusive promises must be ordered after any
2039          * concrete stores to the same thread, or else they can be merged with
2040          * this store later
2041          */
2042         for (unsigned int i = 0; i < promises->size(); i++)
2043                 if ((*promises)[i]->is_compatible_exclusive(curr))
2044                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2045
2046         return added;
2047 }
2048
2049 /** Arbitrary reads from the future are not allowed.  Section 29.3
2050  * part 9 places some constraints.  This method checks one result of constraint
2051  * constraint.  Others require compiler support. */
2052 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2053 {
2054         if (!writer->is_rmw())
2055                 return true;
2056
2057         if (!reader->is_rmw())
2058                 return true;
2059
2060         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2061                 if (search == reader)
2062                         return false;
2063                 if (search->get_tid() == reader->get_tid() &&
2064                                 search->happens_before(reader))
2065                         break;
2066         }
2067
2068         return true;
2069 }
2070
2071 /**
2072  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2073  * some constraints. This method checks one the following constraint (others
2074  * require compiler support):
2075  *
2076  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2077  */
2078 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2079 {
2080         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2081         unsigned int i;
2082         /* Iterate over all threads */
2083         for (i = 0; i < thrd_lists->size(); i++) {
2084                 const ModelAction *write_after_read = NULL;
2085
2086                 /* Iterate over actions in thread, starting from most recent */
2087                 action_list_t *list = &(*thrd_lists)[i];
2088                 action_list_t::reverse_iterator rit;
2089                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2090                         ModelAction *act = *rit;
2091
2092                         /* Don't disallow due to act == reader */
2093                         if (!reader->happens_before(act) || reader == act)
2094                                 break;
2095                         else if (act->is_write())
2096                                 write_after_read = act;
2097                         else if (act->is_read() && act->get_reads_from() != NULL)
2098                                 write_after_read = act->get_reads_from();
2099                 }
2100
2101                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2102                         return false;
2103         }
2104         return true;
2105 }
2106
2107 /**
2108  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2109  * The ModelAction under consideration is expected to be taking part in
2110  * release/acquire synchronization as an object of the "reads from" relation.
2111  * Note that this can only provide release sequence support for RMW chains
2112  * which do not read from the future, as those actions cannot be traced until
2113  * their "promise" is fulfilled. Similarly, we may not even establish the
2114  * presence of a release sequence with certainty, as some modification order
2115  * constraints may be decided further in the future. Thus, this function
2116  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2117  * and a boolean representing certainty.
2118  *
2119  * @param rf The action that might be part of a release sequence. Must be a
2120  * write.
2121  * @param release_heads A pass-by-reference style return parameter. After
2122  * execution of this function, release_heads will contain the heads of all the
2123  * relevant release sequences, if any exists with certainty
2124  * @param pending A pass-by-reference style return parameter which is only used
2125  * when returning false (i.e., uncertain). Returns most information regarding
2126  * an uncertain release sequence, including any write operations that might
2127  * break the sequence.
2128  * @return true, if the ModelChecker is certain that release_heads is complete;
2129  * false otherwise
2130  */
2131 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2132                 rel_heads_list_t *release_heads,
2133                 struct release_seq *pending) const
2134 {
2135         /* Only check for release sequences if there are no cycles */
2136         if (mo_graph->checkForCycles())
2137                 return false;
2138
2139         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2140                 ASSERT(rf->is_write());
2141
2142                 if (rf->is_release())
2143                         release_heads->push_back(rf);
2144                 else if (rf->get_last_fence_release())
2145                         release_heads->push_back(rf->get_last_fence_release());
2146                 if (!rf->is_rmw())
2147                         break; /* End of RMW chain */
2148
2149                 /** @todo Need to be smarter here...  In the linux lock
2150                  * example, this will run to the beginning of the program for
2151                  * every acquire. */
2152                 /** @todo The way to be smarter here is to keep going until 1
2153                  * thread has a release preceded by an acquire and you've seen
2154                  *       both. */
2155
2156                 /* acq_rel RMW is a sufficient stopping condition */
2157                 if (rf->is_acquire() && rf->is_release())
2158                         return true; /* complete */
2159         };
2160         if (!rf) {
2161                 /* read from future: need to settle this later */
2162                 pending->rf = NULL;
2163                 return false; /* incomplete */
2164         }
2165
2166         if (rf->is_release())
2167                 return true; /* complete */
2168
2169         /* else relaxed write
2170          * - check for fence-release in the same thread (29.8, stmt. 3)
2171          * - check modification order for contiguous subsequence
2172          *   -> rf must be same thread as release */
2173
2174         const ModelAction *fence_release = rf->get_last_fence_release();
2175         /* Synchronize with a fence-release unconditionally; we don't need to
2176          * find any more "contiguous subsequence..." for it */
2177         if (fence_release)
2178                 release_heads->push_back(fence_release);
2179
2180         int tid = id_to_int(rf->get_tid());
2181         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2182         action_list_t *list = &(*thrd_lists)[tid];
2183         action_list_t::const_reverse_iterator rit;
2184
2185         /* Find rf in the thread list */
2186         rit = std::find(list->rbegin(), list->rend(), rf);
2187         ASSERT(rit != list->rend());
2188
2189         /* Find the last {write,fence}-release */
2190         for (; rit != list->rend(); rit++) {
2191                 if (fence_release && *(*rit) < *fence_release)
2192                         break;
2193                 if ((*rit)->is_release())
2194                         break;
2195         }
2196         if (rit == list->rend()) {
2197                 /* No write-release in this thread */
2198                 return true; /* complete */
2199         } else if (fence_release && *(*rit) < *fence_release) {
2200                 /* The fence-release is more recent (and so, "stronger") than
2201                  * the most recent write-release */
2202                 return true; /* complete */
2203         } /* else, need to establish contiguous release sequence */
2204         ModelAction *release = *rit;
2205
2206         ASSERT(rf->same_thread(release));
2207
2208         pending->writes.clear();
2209
2210         bool certain = true;
2211         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2212                 if (id_to_int(rf->get_tid()) == (int)i)
2213                         continue;
2214                 list = &(*thrd_lists)[i];
2215
2216                 /* Can we ensure no future writes from this thread may break
2217                  * the release seq? */
2218                 bool future_ordered = false;
2219
2220                 ModelAction *last = get_last_action(int_to_id(i));
2221                 Thread *th = get_thread(int_to_id(i));
2222                 if ((last && rf->happens_before(last)) ||
2223                                 !is_enabled(th) ||
2224                                 th->is_complete())
2225                         future_ordered = true;
2226
2227                 ASSERT(!th->is_model_thread() || future_ordered);
2228
2229                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2230                         const ModelAction *act = *rit;
2231                         /* Reach synchronization -> this thread is complete */
2232                         if (act->happens_before(release))
2233                                 break;
2234                         if (rf->happens_before(act)) {
2235                                 future_ordered = true;
2236                                 continue;
2237                         }
2238
2239                         /* Only non-RMW writes can break release sequences */
2240                         if (!act->is_write() || act->is_rmw())
2241                                 continue;
2242
2243                         /* Check modification order */
2244                         if (mo_graph->checkReachable(rf, act)) {
2245                                 /* rf --mo--> act */
2246                                 future_ordered = true;
2247                                 continue;
2248                         }
2249                         if (mo_graph->checkReachable(act, release))
2250                                 /* act --mo--> release */
2251                                 break;
2252                         if (mo_graph->checkReachable(release, act) &&
2253                                       mo_graph->checkReachable(act, rf)) {
2254                                 /* release --mo-> act --mo--> rf */
2255                                 return true; /* complete */
2256                         }
2257                         /* act may break release sequence */
2258                         pending->writes.push_back(act);
2259                         certain = false;
2260                 }
2261                 if (!future_ordered)
2262                         certain = false; /* This thread is uncertain */
2263         }
2264
2265         if (certain) {
2266                 release_heads->push_back(release);
2267                 pending->writes.clear();
2268         } else {
2269                 pending->release = release;
2270                 pending->rf = rf;
2271         }
2272         return certain;
2273 }
2274
2275 /**
2276  * An interface for getting the release sequence head(s) with which a
2277  * given ModelAction must synchronize. This function only returns a non-empty
2278  * result when it can locate a release sequence head with certainty. Otherwise,
2279  * it may mark the internal state of the ModelChecker so that it will handle
2280  * the release sequence at a later time, causing @a acquire to update its
2281  * synchronization at some later point in execution.
2282  *
2283  * @param acquire The 'acquire' action that may synchronize with a release
2284  * sequence
2285  * @param read The read action that may read from a release sequence; this may
2286  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2287  * when 'acquire' is a fence-acquire)
2288  * @param release_heads A pass-by-reference return parameter. Will be filled
2289  * with the head(s) of the release sequence(s), if they exists with certainty.
2290  * @see ModelChecker::release_seq_heads
2291  */
2292 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2293                 ModelAction *read, rel_heads_list_t *release_heads)
2294 {
2295         const ModelAction *rf = read->get_reads_from();
2296         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2297         sequence->acquire = acquire;
2298         sequence->read = read;
2299
2300         if (!release_seq_heads(rf, release_heads, sequence)) {
2301                 /* add act to 'lazy checking' list */
2302                 pending_rel_seqs->push_back(sequence);
2303         } else {
2304                 snapshot_free(sequence);
2305         }
2306 }
2307
2308 /**
2309  * Attempt to resolve all stashed operations that might synchronize with a
2310  * release sequence for a given location. This implements the "lazy" portion of
2311  * determining whether or not a release sequence was contiguous, since not all
2312  * modification order information is present at the time an action occurs.
2313  *
2314  * @param location The location/object that should be checked for release
2315  * sequence resolutions. A NULL value means to check all locations.
2316  * @param work_queue The work queue to which to add work items as they are
2317  * generated
2318  * @return True if any updates occurred (new synchronization, new mo_graph
2319  * edges)
2320  */
2321 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2322 {
2323         bool updated = false;
2324         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2325         while (it != pending_rel_seqs->end()) {
2326                 struct release_seq *pending = *it;
2327                 ModelAction *acquire = pending->acquire;
2328                 const ModelAction *read = pending->read;
2329
2330                 /* Only resolve sequences on the given location, if provided */
2331                 if (location && read->get_location() != location) {
2332                         it++;
2333                         continue;
2334                 }
2335
2336                 const ModelAction *rf = read->get_reads_from();
2337                 rel_heads_list_t release_heads;
2338                 bool complete;
2339                 complete = release_seq_heads(rf, &release_heads, pending);
2340                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2341                         if (!acquire->has_synchronized_with(release_heads[i])) {
2342                                 if (acquire->synchronize_with(release_heads[i]))
2343                                         updated = true;
2344                                 else
2345                                         set_bad_synchronization();
2346                         }
2347                 }
2348
2349                 if (updated) {
2350                         /* Re-check all pending release sequences */
2351                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2352                         /* Re-check read-acquire for mo_graph edges */
2353                         if (acquire->is_read())
2354                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2355
2356                         /* propagate synchronization to later actions */
2357                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2358                         for (; (*rit) != acquire; rit++) {
2359                                 ModelAction *propagate = *rit;
2360                                 if (acquire->happens_before(propagate)) {
2361                                         propagate->synchronize_with(acquire);
2362                                         /* Re-check 'propagate' for mo_graph edges */
2363                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2364                                 }
2365                         }
2366                 }
2367                 if (complete) {
2368                         it = pending_rel_seqs->erase(it);
2369                         snapshot_free(pending);
2370                 } else {
2371                         it++;
2372                 }
2373         }
2374
2375         // If we resolved promises or data races, see if we have realized a data race.
2376         checkDataRaces();
2377
2378         return updated;
2379 }
2380
2381 /**
2382  * Performs various bookkeeping operations for the current ModelAction. For
2383  * instance, adds action to the per-object, per-thread action vector and to the
2384  * action trace list of all thread actions.
2385  *
2386  * @param act is the ModelAction to add.
2387  */
2388 void ModelChecker::add_action_to_lists(ModelAction *act)
2389 {
2390         int tid = id_to_int(act->get_tid());
2391         ModelAction *uninit = NULL;
2392         int uninit_id = -1;
2393         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2394         if (list->empty() && act->is_atomic_var()) {
2395                 uninit = get_uninitialized_action(act);
2396                 uninit_id = id_to_int(uninit->get_tid());
2397                 list->push_front(uninit);
2398         }
2399         list->push_back(act);
2400
2401         action_trace->push_back(act);
2402         if (uninit)
2403                 action_trace->push_front(uninit);
2404
2405         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2406         if (tid >= (int)vec->size())
2407                 vec->resize(priv->next_thread_id);
2408         (*vec)[tid].push_back(act);
2409         if (uninit)
2410                 (*vec)[uninit_id].push_front(uninit);
2411
2412         if ((int)thrd_last_action->size() <= tid)
2413                 thrd_last_action->resize(get_num_threads());
2414         (*thrd_last_action)[tid] = act;
2415         if (uninit)
2416                 (*thrd_last_action)[uninit_id] = uninit;
2417
2418         if (act->is_fence() && act->is_release()) {
2419                 if ((int)thrd_last_fence_release->size() <= tid)
2420                         thrd_last_fence_release->resize(get_num_threads());
2421                 (*thrd_last_fence_release)[tid] = act;
2422         }
2423
2424         if (act->is_wait()) {
2425                 void *mutex_loc = (void *) act->get_value();
2426                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2427
2428                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2429                 if (tid >= (int)vec->size())
2430                         vec->resize(priv->next_thread_id);
2431                 (*vec)[tid].push_back(act);
2432         }
2433 }
2434
2435 /**
2436  * @brief Get the last action performed by a particular Thread
2437  * @param tid The thread ID of the Thread in question
2438  * @return The last action in the thread
2439  */
2440 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2441 {
2442         int threadid = id_to_int(tid);
2443         if (threadid < (int)thrd_last_action->size())
2444                 return (*thrd_last_action)[id_to_int(tid)];
2445         else
2446                 return NULL;
2447 }
2448
2449 /**
2450  * @brief Get the last fence release performed by a particular Thread
2451  * @param tid The thread ID of the Thread in question
2452  * @return The last fence release in the thread, if one exists; NULL otherwise
2453  */
2454 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2455 {
2456         int threadid = id_to_int(tid);
2457         if (threadid < (int)thrd_last_fence_release->size())
2458                 return (*thrd_last_fence_release)[id_to_int(tid)];
2459         else
2460                 return NULL;
2461 }
2462
2463 /**
2464  * Gets the last memory_order_seq_cst write (in the total global sequence)
2465  * performed on a particular object (i.e., memory location), not including the
2466  * current action.
2467  * @param curr The current ModelAction; also denotes the object location to
2468  * check
2469  * @return The last seq_cst write
2470  */
2471 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2472 {
2473         void *location = curr->get_location();
2474         action_list_t *list = get_safe_ptr_action(obj_map, location);
2475         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2476         action_list_t::reverse_iterator rit;
2477         for (rit = list->rbegin(); (*rit) != curr; rit++)
2478                 ;
2479         rit++; /* Skip past curr */
2480         for ( ; rit != list->rend(); rit++)
2481                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2482                         return *rit;
2483         return NULL;
2484 }
2485
2486 /**
2487  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2488  * performed in a particular thread, prior to a particular fence.
2489  * @param tid The ID of the thread to check
2490  * @param before_fence The fence from which to begin the search; if NULL, then
2491  * search for the most recent fence in the thread.
2492  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2493  */
2494 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2495 {
2496         /* All fences should have NULL location */
2497         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2498         action_list_t::reverse_iterator rit = list->rbegin();
2499
2500         if (before_fence) {
2501                 for (; rit != list->rend(); rit++)
2502                         if (*rit == before_fence)
2503                                 break;
2504
2505                 ASSERT(*rit == before_fence);
2506                 rit++;
2507         }
2508
2509         for (; rit != list->rend(); rit++)
2510                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2511                         return *rit;
2512         return NULL;
2513 }
2514
2515 /**
2516  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2517  * location). This function identifies the mutex according to the current
2518  * action, which is presumed to perform on the same mutex.
2519  * @param curr The current ModelAction; also denotes the object location to
2520  * check
2521  * @return The last unlock operation
2522  */
2523 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2524 {
2525         void *location = curr->get_location();
2526         action_list_t *list = get_safe_ptr_action(obj_map, location);
2527         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2528         action_list_t::reverse_iterator rit;
2529         for (rit = list->rbegin(); rit != list->rend(); rit++)
2530                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2531                         return *rit;
2532         return NULL;
2533 }
2534
2535 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2536 {
2537         ModelAction *parent = get_last_action(tid);
2538         if (!parent)
2539                 parent = get_thread(tid)->get_creation();
2540         return parent;
2541 }
2542
2543 /**
2544  * Returns the clock vector for a given thread.
2545  * @param tid The thread whose clock vector we want
2546  * @return Desired clock vector
2547  */
2548 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2549 {
2550         return get_parent_action(tid)->get_cv();
2551 }
2552
2553 /**
2554  * @brief Find the promise (if any) to resolve for the current action and
2555  * remove it from the pending promise vector
2556  * @param curr The current ModelAction. Should be a write.
2557  * @return The Promise to resolve, if any; otherwise NULL
2558  */
2559 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2560 {
2561         for (unsigned int i = 0; i < promises->size(); i++)
2562                 if (curr->get_node()->get_promise(i)) {
2563                         Promise *ret = (*promises)[i];
2564                         promises->erase(promises->begin() + i);
2565                         return ret;
2566                 }
2567         return NULL;
2568 }
2569
2570 /**
2571  * Resolve a Promise with a current write.
2572  * @param write The ModelAction that is fulfilling Promises
2573  * @param promise The Promise to resolve
2574  * @return True if the Promise was successfully resolved; false otherwise
2575  */
2576 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2577 {
2578         ModelVector<ModelAction *> actions_to_check;
2579
2580         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2581                 ModelAction *read = promise->get_reader(i);
2582                 read_from(read, write);
2583                 actions_to_check.push_back(read);
2584         }
2585         /* Make sure the promise's value matches the write's value */
2586         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2587         if (!mo_graph->resolvePromise(promise, write))
2588                 priv->failed_promise = true;
2589
2590         /**
2591          * @todo  It is possible to end up in an inconsistent state, where a
2592          * "resolved" promise may still be referenced if
2593          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2594          *
2595          * Note that the inconsistency only matters when dumping mo_graph to
2596          * file.
2597          *
2598          * delete promise;
2599          */
2600
2601         //Check whether reading these writes has made threads unable to
2602         //resolve promises
2603         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2604                 ModelAction *read = actions_to_check[i];
2605                 mo_check_promises(read, true);
2606         }
2607
2608         return true;
2609 }
2610
2611 /**
2612  * Compute the set of promises that could potentially be satisfied by this
2613  * action. Note that the set computation actually appears in the Node, not in
2614  * ModelChecker.
2615  * @param curr The ModelAction that may satisfy promises
2616  */
2617 void ModelChecker::compute_promises(ModelAction *curr)
2618 {
2619         for (unsigned int i = 0; i < promises->size(); i++) {
2620                 Promise *promise = (*promises)[i];
2621                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2622                         continue;
2623
2624                 bool satisfy = true;
2625                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2626                         const ModelAction *act = promise->get_reader(j);
2627                         if (act->happens_before(curr) ||
2628                                         act->could_synchronize_with(curr)) {
2629                                 satisfy = false;
2630                                 break;
2631                         }
2632                 }
2633                 if (satisfy)
2634                         curr->get_node()->set_promise(i);
2635         }
2636 }
2637
2638 /** Checks promises in response to change in ClockVector Threads. */
2639 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2640 {
2641         for (unsigned int i = 0; i < promises->size(); i++) {
2642                 Promise *promise = (*promises)[i];
2643                 if (!promise->thread_is_available(tid))
2644                         continue;
2645                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2646                         const ModelAction *act = promise->get_reader(j);
2647                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2648                                         merge_cv->synchronized_since(act)) {
2649                                 if (promise->eliminate_thread(tid)) {
2650                                         /* Promise has failed */
2651                                         priv->failed_promise = true;
2652                                         return;
2653                                 }
2654                         }
2655                 }
2656         }
2657 }
2658
2659 void ModelChecker::check_promises_thread_disabled()
2660 {
2661         for (unsigned int i = 0; i < promises->size(); i++) {
2662                 Promise *promise = (*promises)[i];
2663                 if (promise->has_failed()) {
2664                         priv->failed_promise = true;
2665                         return;
2666                 }
2667         }
2668 }
2669
2670 /**
2671  * @brief Checks promises in response to addition to modification order for
2672  * threads.
2673  *
2674  * We test whether threads are still available for satisfying promises after an
2675  * addition to our modification order constraints. Those that are unavailable
2676  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2677  * that promise has failed.
2678  *
2679  * @param act The ModelAction which updated the modification order
2680  * @param is_read_check Should be true if act is a read and we must check for
2681  * updates to the store from which it read (there is a distinction here for
2682  * RMW's, which are both a load and a store)
2683  */
2684 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2685 {
2686         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2687
2688         for (unsigned int i = 0; i < promises->size(); i++) {
2689                 Promise *promise = (*promises)[i];
2690
2691                 // Is this promise on the same location?
2692                 if (!promise->same_location(write))
2693                         continue;
2694
2695                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2696                         const ModelAction *pread = promise->get_reader(j);
2697                         if (!pread->happens_before(act))
2698                                continue;
2699                         if (mo_graph->checkPromise(write, promise)) {
2700                                 priv->failed_promise = true;
2701                                 return;
2702                         }
2703                         break;
2704                 }
2705
2706                 // Don't do any lookups twice for the same thread
2707                 if (!promise->thread_is_available(act->get_tid()))
2708                         continue;
2709
2710                 if (mo_graph->checkReachable(promise, write)) {
2711                         if (mo_graph->checkPromise(write, promise)) {
2712                                 priv->failed_promise = true;
2713                                 return;
2714                         }
2715                 }
2716         }
2717 }
2718
2719 /**
2720  * Compute the set of writes that may break the current pending release
2721  * sequence. This information is extracted from previou release sequence
2722  * calculations.
2723  *
2724  * @param curr The current ModelAction. Must be a release sequence fixup
2725  * action.
2726  */
2727 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2728 {
2729         if (pending_rel_seqs->empty())
2730                 return;
2731
2732         struct release_seq *pending = pending_rel_seqs->back();
2733         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2734                 const ModelAction *write = pending->writes[i];
2735                 curr->get_node()->add_relseq_break(write);
2736         }
2737
2738         /* NULL means don't break the sequence; just synchronize */
2739         curr->get_node()->add_relseq_break(NULL);
2740 }
2741
2742 /**
2743  * Build up an initial set of all past writes that this 'read' action may read
2744  * from, as well as any previously-observed future values that must still be valid.
2745  *
2746  * @param curr is the current ModelAction that we are exploring; it must be a
2747  * 'read' operation.
2748  */
2749 void ModelChecker::build_may_read_from(ModelAction *curr)
2750 {
2751         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2752         unsigned int i;
2753         ASSERT(curr->is_read());
2754
2755         ModelAction *last_sc_write = NULL;
2756
2757         if (curr->is_seqcst())
2758                 last_sc_write = get_last_seq_cst_write(curr);
2759
2760         /* Iterate over all threads */
2761         for (i = 0; i < thrd_lists->size(); i++) {
2762                 /* Iterate over actions in thread, starting from most recent */
2763                 action_list_t *list = &(*thrd_lists)[i];
2764                 action_list_t::reverse_iterator rit;
2765                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2766                         ModelAction *act = *rit;
2767
2768                         /* Only consider 'write' actions */
2769                         if (!act->is_write() || act == curr)
2770                                 continue;
2771
2772                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2773                         bool allow_read = true;
2774
2775                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2776                                 allow_read = false;
2777                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2778                                 allow_read = false;
2779
2780                         if (allow_read) {
2781                                 /* Only add feasible reads */
2782                                 mo_graph->startChanges();
2783                                 r_modification_order(curr, act);
2784                                 if (!is_infeasible())
2785                                         curr->get_node()->add_read_from_past(act);
2786                                 mo_graph->rollbackChanges();
2787                         }
2788
2789                         /* Include at most one act per-thread that "happens before" curr */
2790                         if (act->happens_before(curr))
2791                                 break;
2792                 }
2793         }
2794
2795         /* Inherit existing, promised future values */
2796         for (i = 0; i < promises->size(); i++) {
2797                 const Promise *promise = (*promises)[i];
2798                 const ModelAction *promise_read = promise->get_reader(0);
2799                 if (promise_read->same_var(curr)) {
2800                         /* Only add feasible future-values */
2801                         mo_graph->startChanges();
2802                         r_modification_order(curr, promise);
2803                         if (!is_infeasible())
2804                                 curr->get_node()->add_read_from_promise(promise_read);
2805                         mo_graph->rollbackChanges();
2806                 }
2807         }
2808
2809         /* We may find no valid may-read-from only if the execution is doomed */
2810         if (!curr->get_node()->read_from_size()) {
2811                 priv->no_valid_reads = true;
2812                 set_assert();
2813         }
2814
2815         if (DBG_ENABLED()) {
2816                 model_print("Reached read action:\n");
2817                 curr->print();
2818                 model_print("Printing read_from_past\n");
2819                 curr->get_node()->print_read_from_past();
2820                 model_print("End printing read_from_past\n");
2821         }
2822 }
2823
2824 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2825 {
2826         for ( ; write != NULL; write = write->get_reads_from()) {
2827                 /* UNINIT actions don't have a Node, and they never sleep */
2828                 if (write->is_uninitialized())
2829                         return true;
2830                 Node *prevnode = write->get_node()->get_parent();
2831
2832                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2833                 if (write->is_release() && thread_sleep)
2834                         return true;
2835                 if (!write->is_rmw())
2836                         return false;
2837         }
2838         return true;
2839 }
2840
2841 /**
2842  * @brief Get an action representing an uninitialized atomic
2843  *
2844  * This function may create a new one or try to retrieve one from the NodeStack
2845  *
2846  * @param curr The current action, which prompts the creation of an UNINIT action
2847  * @return A pointer to the UNINIT ModelAction
2848  */
2849 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2850 {
2851         Node *node = curr->get_node();
2852         ModelAction *act = node->get_uninit_action();
2853         if (!act) {
2854                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2855                 node->set_uninit_action(act);
2856         }
2857         act->create_cv(NULL);
2858         return act;
2859 }
2860
2861 static void print_list(action_list_t *list)
2862 {
2863         action_list_t::iterator it;
2864
2865         model_print("---------------------------------------------------------------------\n");
2866
2867         unsigned int hash = 0;
2868
2869         for (it = list->begin(); it != list->end(); it++) {
2870                 const ModelAction *act = *it;
2871                 if (act->get_seq_number() > 0)
2872                         act->print();
2873                 hash = hash^(hash<<3)^((*it)->hash());
2874         }
2875         model_print("HASH %u\n", hash);
2876         model_print("---------------------------------------------------------------------\n");
2877 }
2878
2879 #if SUPPORT_MOD_ORDER_DUMP
2880 void ModelChecker::dumpGraph(char *filename) const
2881 {
2882         char buffer[200];
2883         sprintf(buffer, "%s.dot", filename);
2884         FILE *file = fopen(buffer, "w");
2885         fprintf(file, "digraph %s {\n", filename);
2886         mo_graph->dumpNodes(file);
2887         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2888
2889         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2890                 ModelAction *act = *it;
2891                 if (act->is_read()) {
2892                         mo_graph->dot_print_node(file, act);
2893                         if (act->get_reads_from())
2894                                 mo_graph->dot_print_edge(file,
2895                                                 act->get_reads_from(),
2896                                                 act,
2897                                                 "label=\"rf\", color=red, weight=2");
2898                         else
2899                                 mo_graph->dot_print_edge(file,
2900                                                 act->get_reads_from_promise(),
2901                                                 act,
2902                                                 "label=\"rf\", color=red");
2903                 }
2904                 if (thread_array[act->get_tid()]) {
2905                         mo_graph->dot_print_edge(file,
2906                                         thread_array[id_to_int(act->get_tid())],
2907                                         act,
2908                                         "label=\"sb\", color=blue, weight=400");
2909                 }
2910
2911                 thread_array[act->get_tid()] = act;
2912         }
2913         fprintf(file, "}\n");
2914         model_free(thread_array);
2915         fclose(file);
2916 }
2917 #endif
2918
2919 /** @brief Prints an execution trace summary. */
2920 void ModelChecker::print_summary() const
2921 {
2922 #if SUPPORT_MOD_ORDER_DUMP
2923         char buffername[100];
2924         sprintf(buffername, "exec%04u", stats.num_total);
2925         mo_graph->dumpGraphToFile(buffername);
2926         sprintf(buffername, "graph%04u", stats.num_total);
2927         dumpGraph(buffername);
2928 #endif
2929
2930         model_print("Execution %d:", stats.num_total);
2931         if (isfeasibleprefix()) {
2932                 if (scheduler->all_threads_sleeping())
2933                         model_print(" SLEEP-SET REDUNDANT");
2934                 model_print("\n");
2935         } else
2936                 print_infeasibility(" INFEASIBLE");
2937         print_list(action_trace);
2938         model_print("\n");
2939         if (!promises->empty()) {
2940                 model_print("Pending promises:\n");
2941                 for (unsigned int i = 0; i < promises->size(); i++) {
2942                         model_print(" [P%u] ", i);
2943                         (*promises)[i]->print();
2944                 }
2945                 model_print("\n");
2946         }
2947 }
2948
2949 /**
2950  * Add a Thread to the system for the first time. Should only be called once
2951  * per thread.
2952  * @param t The Thread to add
2953  */
2954 void ModelChecker::add_thread(Thread *t)
2955 {
2956         thread_map->put(id_to_int(t->get_id()), t);
2957         scheduler->add_thread(t);
2958 }
2959
2960 /**
2961  * @brief Get a Thread reference by its ID
2962  * @param tid The Thread's ID
2963  * @return A Thread reference
2964  */
2965 Thread * ModelChecker::get_thread(thread_id_t tid) const
2966 {
2967         return thread_map->get(id_to_int(tid));
2968 }
2969
2970 /**
2971  * @brief Get a reference to the Thread in which a ModelAction was executed
2972  * @param act The ModelAction
2973  * @return A Thread reference
2974  */
2975 Thread * ModelChecker::get_thread(const ModelAction *act) const
2976 {
2977         return get_thread(act->get_tid());
2978 }
2979
2980 /**
2981  * @brief Get a Promise's "promise number"
2982  *
2983  * A "promise number" is an index number that is unique to a promise, valid
2984  * only for a specific snapshot of an execution trace. Promises may come and go
2985  * as they are generated an resolved, so an index only retains meaning for the
2986  * current snapshot.
2987  *
2988  * @param promise The Promise to check
2989  * @return The promise index, if the promise still is valid; otherwise -1
2990  */
2991 int ModelChecker::get_promise_number(const Promise *promise) const
2992 {
2993         for (unsigned int i = 0; i < promises->size(); i++)
2994                 if ((*promises)[i] == promise)
2995                         return i;
2996         /* Not found */
2997         return -1;
2998 }
2999
3000 /**
3001  * @brief Check if a Thread is currently enabled
3002  * @param t The Thread to check
3003  * @return True if the Thread is currently enabled
3004  */
3005 bool ModelChecker::is_enabled(Thread *t) const
3006 {
3007         return scheduler->is_enabled(t);
3008 }
3009
3010 /**
3011  * @brief Check if a Thread is currently enabled
3012  * @param tid The ID of the Thread to check
3013  * @return True if the Thread is currently enabled
3014  */
3015 bool ModelChecker::is_enabled(thread_id_t tid) const
3016 {
3017         return scheduler->is_enabled(tid);
3018 }
3019
3020 /**
3021  * Switch from a model-checker context to a user-thread context. This is the
3022  * complement of ModelChecker::switch_to_master and must be called from the
3023  * model-checker context
3024  *
3025  * @param thread The user-thread to switch to
3026  */
3027 void ModelChecker::switch_from_master(Thread *thread)
3028 {
3029         scheduler->set_current_thread(thread);
3030         Thread::swap(&system_context, thread);
3031 }
3032
3033 /**
3034  * Switch from a user-context to the "master thread" context (a.k.a. system
3035  * context). This switch is made with the intention of exploring a particular
3036  * model-checking action (described by a ModelAction object). Must be called
3037  * from a user-thread context.
3038  *
3039  * @param act The current action that will be explored. May be NULL only if
3040  * trace is exiting via an assertion (see ModelChecker::set_assert and
3041  * ModelChecker::has_asserted).
3042  * @return Return the value returned by the current action
3043  */
3044 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3045 {
3046         DBG();
3047         Thread *old = thread_current();
3048         scheduler->set_current_thread(NULL);
3049         ASSERT(!old->get_pending());
3050         old->set_pending(act);
3051         if (Thread::swap(old, &system_context) < 0) {
3052                 perror("swap threads");
3053                 exit(EXIT_FAILURE);
3054         }
3055         return old->get_return_value();
3056 }
3057
3058 /**
3059  * Takes the next step in the execution, if possible.
3060  * @param curr The current step to take
3061  * @return Returns the next Thread to run, if any; NULL if this execution
3062  * should terminate
3063  */
3064 Thread * ModelChecker::take_step(ModelAction *curr)
3065 {
3066         Thread *curr_thrd = get_thread(curr);
3067         ASSERT(curr_thrd->get_state() == THREAD_READY);
3068
3069         curr = check_current_action(curr);
3070
3071         /* Infeasible -> don't take any more steps */
3072         if (is_infeasible())
3073                 return NULL;
3074         else if (isfeasibleprefix() && have_bug_reports()) {
3075                 set_assert();
3076                 return NULL;
3077         }
3078
3079         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3080                 return NULL;
3081
3082         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3083                 scheduler->remove_thread(curr_thrd);
3084
3085         Thread *next_thrd = NULL;
3086         if (curr)
3087                 next_thrd = action_select_next_thread(curr);
3088         if (!next_thrd)
3089                 next_thrd = get_next_thread();
3090
3091         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3092                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3093
3094         return next_thrd;
3095 }
3096
3097 /** Wrapper to run the user's main function, with appropriate arguments */
3098 void user_main_wrapper(void *)
3099 {
3100         user_main(model->params.argc, model->params.argv);
3101 }
3102
3103 /** @brief Run ModelChecker for the user program */
3104 void ModelChecker::run()
3105 {
3106         do {
3107                 thrd_t user_thread;
3108                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3109                 add_thread(t);
3110
3111                 do {
3112                         /*
3113                          * Stash next pending action(s) for thread(s). There
3114                          * should only need to stash one thread's action--the
3115                          * thread which just took a step--plus the first step
3116                          * for any newly-created thread
3117                          */
3118                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3119                                 thread_id_t tid = int_to_id(i);
3120                                 Thread *thr = get_thread(tid);
3121                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3122                                         switch_from_master(thr);
3123                                         if (is_circular_wait(thr))
3124                                                 assert_bug("Deadlock detected");
3125                                 }
3126                         }
3127
3128                         /* Catch assertions from prior take_step or from
3129                          * between-ModelAction bugs (e.g., data races) */
3130                         if (has_asserted())
3131                                 break;
3132
3133                         /* Consume the next action for a Thread */
3134                         ModelAction *curr = t->get_pending();
3135                         t->set_pending(NULL);
3136                         t = take_step(curr);
3137                 } while (t && !t->is_model_thread());
3138
3139                 /*
3140                  * Launch end-of-execution release sequence fixups only when
3141                  * the execution is otherwise feasible AND there are:
3142                  *
3143                  * (1) pending release sequences
3144                  * (2) pending assertions that could be invalidated by a change
3145                  * in clock vectors (i.e., data races)
3146                  * (3) no pending promises
3147                  */
3148                 while (!pending_rel_seqs->empty() &&
3149                                 is_feasible_prefix_ignore_relseq() &&
3150                                 !unrealizedraces.empty()) {
3151                         model_print("*** WARNING: release sequence fixup action "
3152                                         "(%zu pending release seuqence(s)) ***\n",
3153                                         pending_rel_seqs->size());
3154                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3155                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3156                                         model_thread);
3157                         take_step(fixup);
3158                 };
3159         } while (next_execution());
3160
3161         model_print("******* Model-checking complete: *******\n");
3162         print_stats();
3163 }