un-'const' some Promises
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else
487                 stats.num_redundant++;
488 }
489
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
492 {
493         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494         model_print("Number of redundant executions: %d\n", stats.num_redundant);
495         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497         model_print("Total executions: %d\n", stats.num_total);
498         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
499 }
500
501 /**
502  * @brief End-of-exeuction print
503  * @param printbugs Should any existing bugs be printed?
504  */
505 void ModelChecker::print_execution(bool printbugs) const
506 {
507         print_program_output();
508
509         if (DBG_ENABLED() || params.verbose) {
510                 model_print("Earliest divergence point since last feasible execution:\n");
511                 if (earliest_diverge)
512                         earliest_diverge->print();
513                 else
514                         model_print("(Not set)\n");
515
516                 model_print("\n");
517                 print_stats();
518         }
519
520         /* Don't print invalid bugs */
521         if (printbugs)
522                 print_bugs();
523
524         model_print("\n");
525         print_summary();
526 }
527
528 /**
529  * Queries the model-checker for more executions to explore and, if one
530  * exists, resets the model-checker state to execute a new execution.
531  *
532  * @return If there are more executions to explore, return true. Otherwise,
533  * return false.
534  */
535 bool ModelChecker::next_execution()
536 {
537         DBG();
538         /* Is this execution a feasible execution that's worth bug-checking? */
539         bool complete = isfeasibleprefix() && (is_complete_execution() ||
540                         have_bug_reports());
541
542         /* End-of-execution bug checks */
543         if (complete) {
544                 if (is_deadlocked())
545                         assert_bug("Deadlock detected");
546
547                 checkDataRaces();
548         }
549
550         record_stats();
551
552         /* Output */
553         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
554                 print_execution(complete);
555         else
556                 clear_program_output();
557
558         if (complete)
559                 earliest_diverge = NULL;
560
561         if ((diverge = get_next_backtrack()) == NULL)
562                 return false;
563
564         if (DBG_ENABLED()) {
565                 model_print("Next execution will diverge at:\n");
566                 diverge->print();
567         }
568
569         reset_to_initial_state();
570         return true;
571 }
572
573 /**
574  * @brief Find the last fence-related backtracking conflict for a ModelAction
575  *
576  * This function performs the search for the most recent conflicting action
577  * against which we should perform backtracking, as affected by fence
578  * operations. This includes pairs of potentially-synchronizing actions which
579  * occur due to fence-acquire or fence-release, and hence should be explored in
580  * the opposite execution order.
581  *
582  * @param act The current action
583  * @return The most recent action which conflicts with act due to fences
584  */
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
586 {
587         /* Only perform release/acquire fence backtracking for stores */
588         if (!act->is_write())
589                 return NULL;
590
591         /* Find a fence-release (or, act is a release) */
592         ModelAction *last_release;
593         if (act->is_release())
594                 last_release = act;
595         else
596                 last_release = get_last_fence_release(act->get_tid());
597         if (!last_release)
598                 return NULL;
599
600         /* Skip past the release */
601         action_list_t *list = action_trace;
602         action_list_t::reverse_iterator rit;
603         for (rit = list->rbegin(); rit != list->rend(); rit++)
604                 if (*rit == last_release)
605                         break;
606         ASSERT(rit != list->rend());
607
608         /* Find a prior:
609          *   load-acquire
610          * or
611          *   load --sb-> fence-acquire */
612         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
613         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
614         bool found_acquire_fences = false;
615         for ( ; rit != list->rend(); rit++) {
616                 ModelAction *prev = *rit;
617                 if (act->same_thread(prev))
618                         continue;
619
620                 int tid = id_to_int(prev->get_tid());
621
622                 if (prev->is_read() && act->same_var(prev)) {
623                         if (prev->is_acquire()) {
624                                 /* Found most recent load-acquire, don't need
625                                  * to search for more fences */
626                                 if (!found_acquire_fences)
627                                         return NULL;
628                         } else {
629                                 prior_loads[tid] = prev;
630                         }
631                 }
632                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633                         found_acquire_fences = true;
634                         acquire_fences[tid] = prev;
635                 }
636         }
637
638         ModelAction *latest_backtrack = NULL;
639         for (unsigned int i = 0; i < acquire_fences.size(); i++)
640                 if (acquire_fences[i] && prior_loads[i])
641                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642                                 latest_backtrack = acquire_fences[i];
643         return latest_backtrack;
644 }
645
646 /**
647  * @brief Find the last backtracking conflict for a ModelAction
648  *
649  * This function performs the search for the most recent conflicting action
650  * against which we should perform backtracking. This primary includes pairs of
651  * synchronizing actions which should be explored in the opposite execution
652  * order.
653  *
654  * @param act The current action
655  * @return The most recent action which conflicts with act
656  */
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
658 {
659         switch (act->get_type()) {
660         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
661         case ATOMIC_READ:
662         case ATOMIC_WRITE:
663         case ATOMIC_RMW: {
664                 ModelAction *ret = NULL;
665
666                 /* linear search: from most recent to oldest */
667                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668                 action_list_t::reverse_iterator rit;
669                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670                         ModelAction *prev = *rit;
671                         if (prev->could_synchronize_with(act)) {
672                                 ret = prev;
673                                 break;
674                         }
675                 }
676
677                 ModelAction *ret2 = get_last_fence_conflict(act);
678                 if (!ret2)
679                         return ret;
680                 if (!ret)
681                         return ret2;
682                 if (*ret < *ret2)
683                         return ret2;
684                 return ret;
685         }
686         case ATOMIC_LOCK:
687         case ATOMIC_TRYLOCK: {
688                 /* linear search: from most recent to oldest */
689                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690                 action_list_t::reverse_iterator rit;
691                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692                         ModelAction *prev = *rit;
693                         if (act->is_conflicting_lock(prev))
694                                 return prev;
695                 }
696                 break;
697         }
698         case ATOMIC_UNLOCK: {
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (!act->same_thread(prev) && prev->is_failed_trylock())
705                                 return prev;
706                 }
707                 break;
708         }
709         case ATOMIC_WAIT: {
710                 /* linear search: from most recent to oldest */
711                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712                 action_list_t::reverse_iterator rit;
713                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714                         ModelAction *prev = *rit;
715                         if (!act->same_thread(prev) && prev->is_failed_trylock())
716                                 return prev;
717                         if (!act->same_thread(prev) && prev->is_notify())
718                                 return prev;
719                 }
720                 break;
721         }
722
723         case ATOMIC_NOTIFY_ALL:
724         case ATOMIC_NOTIFY_ONE: {
725                 /* linear search: from most recent to oldest */
726                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727                 action_list_t::reverse_iterator rit;
728                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729                         ModelAction *prev = *rit;
730                         if (!act->same_thread(prev) && prev->is_wait())
731                                 return prev;
732                 }
733                 break;
734         }
735         default:
736                 break;
737         }
738         return NULL;
739 }
740
741 /** This method finds backtracking points where we should try to
742  * reorder the parameter ModelAction against.
743  *
744  * @param the ModelAction to find backtracking points for.
745  */
746 void ModelChecker::set_backtracking(ModelAction *act)
747 {
748         Thread *t = get_thread(act);
749         ModelAction *prev = get_last_conflict(act);
750         if (prev == NULL)
751                 return;
752
753         Node *node = prev->get_node()->get_parent();
754
755         int low_tid, high_tid;
756         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
757                 low_tid = id_to_int(act->get_tid());
758                 high_tid = low_tid + 1;
759         } else {
760                 low_tid = 0;
761                 high_tid = get_num_threads();
762         }
763
764         for (int i = low_tid; i < high_tid; i++) {
765                 thread_id_t tid = int_to_id(i);
766
767                 /* Make sure this thread can be enabled here. */
768                 if (i >= node->get_num_threads())
769                         break;
770
771                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
772                 if (node->enabled_status(tid) != THREAD_ENABLED)
773                         continue;
774
775                 /* Check if this has been explored already */
776                 if (node->has_been_explored(tid))
777                         continue;
778
779                 /* See if fairness allows */
780                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
781                         bool unfair = false;
782                         for (int t = 0; t < node->get_num_threads(); t++) {
783                                 thread_id_t tother = int_to_id(t);
784                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
785                                         unfair = true;
786                                         break;
787                                 }
788                         }
789                         if (unfair)
790                                 continue;
791                 }
792                 /* Cache the latest backtracking point */
793                 set_latest_backtrack(prev);
794
795                 /* If this is a new backtracking point, mark the tree */
796                 if (!node->set_backtrack(tid))
797                         continue;
798                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
799                                         id_to_int(prev->get_tid()),
800                                         id_to_int(t->get_id()));
801                 if (DBG_ENABLED()) {
802                         prev->print();
803                         act->print();
804                 }
805         }
806 }
807
808 /**
809  * @brief Cache the a backtracking point as the "most recent", if eligible
810  *
811  * Note that this does not prepare the NodeStack for this backtracking
812  * operation, it only caches the action on a per-execution basis
813  *
814  * @param act The operation at which we should explore a different next action
815  * (i.e., backtracking point)
816  * @return True, if this action is now the most recent backtracking point;
817  * false otherwise
818  */
819 bool ModelChecker::set_latest_backtrack(ModelAction *act)
820 {
821         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
822                 priv->next_backtrack = act;
823                 return true;
824         }
825         return false;
826 }
827
828 /**
829  * Returns last backtracking point. The model checker will explore a different
830  * path for this point in the next execution.
831  * @return The ModelAction at which the next execution should diverge.
832  */
833 ModelAction * ModelChecker::get_next_backtrack()
834 {
835         ModelAction *next = priv->next_backtrack;
836         priv->next_backtrack = NULL;
837         return next;
838 }
839
840 /**
841  * Processes a read model action.
842  * @param curr is the read model action to process.
843  * @return True if processing this read updates the mo_graph.
844  */
845 bool ModelChecker::process_read(ModelAction *curr)
846 {
847         Node *node = curr->get_node();
848         uint64_t value = VALUE_NONE;
849         bool updated = false;
850         while (true) {
851                 switch (node->get_read_from_status()) {
852                 case READ_FROM_PAST: {
853                         const ModelAction *rf = node->get_read_from_past();
854                         ASSERT(rf);
855
856                         mo_graph->startChanges();
857                         value = rf->get_value();
858                         check_recency(curr, rf);
859                         bool r_status = r_modification_order(curr, rf);
860
861                         if (is_infeasible() && node->increment_read_from()) {
862                                 mo_graph->rollbackChanges();
863                                 priv->too_many_reads = false;
864                                 continue;
865                         }
866
867                         read_from(curr, rf);
868                         mo_graph->commitChanges();
869                         mo_check_promises(curr, true);
870
871                         updated |= r_status;
872                         break;
873                 }
874                 case READ_FROM_PROMISE: {
875                         Promise *promise = curr->get_node()->get_read_from_promise();
876                         value = promise->get_value();
877                         curr->set_read_from_promise(promise);
878                         mo_graph->startChanges();
879                         updated = r_modification_order(curr, promise);
880                         mo_graph->commitChanges();
881                         break;
882                 }
883                 case READ_FROM_FUTURE: {
884                         /* Read from future value */
885                         struct future_value fv = node->get_future_value();
886                         Promise *promise = new Promise(curr, fv);
887                         value = fv.value;
888                         curr->set_read_from_promise(promise);
889                         promises->push_back(promise);
890                         mo_graph->startChanges();
891                         updated = r_modification_order(curr, promise);
892                         mo_graph->commitChanges();
893                         break;
894                 }
895                 default:
896                         ASSERT(false);
897                 }
898                 get_thread(curr)->set_return_value(value);
899                 return updated;
900         }
901 }
902
903 /**
904  * Processes a lock, trylock, or unlock model action.  @param curr is
905  * the read model action to process.
906  *
907  * The try lock operation checks whether the lock is taken.  If not,
908  * it falls to the normal lock operation case.  If so, it returns
909  * fail.
910  *
911  * The lock operation has already been checked that it is enabled, so
912  * it just grabs the lock and synchronizes with the previous unlock.
913  *
914  * The unlock operation has to re-enable all of the threads that are
915  * waiting on the lock.
916  *
917  * @return True if synchronization was updated; false otherwise
918  */
919 bool ModelChecker::process_mutex(ModelAction *curr)
920 {
921         std::mutex *mutex = NULL;
922         struct std::mutex_state *state = NULL;
923
924         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
925                 mutex = (std::mutex *)curr->get_location();
926                 state = mutex->get_state();
927         } else if (curr->is_wait()) {
928                 mutex = (std::mutex *)curr->get_value();
929                 state = mutex->get_state();
930         }
931
932         switch (curr->get_type()) {
933         case ATOMIC_TRYLOCK: {
934                 bool success = !state->islocked;
935                 curr->set_try_lock(success);
936                 if (!success) {
937                         get_thread(curr)->set_return_value(0);
938                         break;
939                 }
940                 get_thread(curr)->set_return_value(1);
941         }
942                 //otherwise fall into the lock case
943         case ATOMIC_LOCK: {
944                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
945                         assert_bug("Lock access before initialization");
946                 state->islocked = true;
947                 ModelAction *unlock = get_last_unlock(curr);
948                 //synchronize with the previous unlock statement
949                 if (unlock != NULL) {
950                         curr->synchronize_with(unlock);
951                         return true;
952                 }
953                 break;
954         }
955         case ATOMIC_UNLOCK: {
956                 //unlock the lock
957                 state->islocked = false;
958                 //wake up the other threads
959                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
960                 //activate all the waiting threads
961                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
962                         scheduler->wake(get_thread(*rit));
963                 }
964                 waiters->clear();
965                 break;
966         }
967         case ATOMIC_WAIT: {
968                 //unlock the lock
969                 state->islocked = false;
970                 //wake up the other threads
971                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
972                 //activate all the waiting threads
973                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
974                         scheduler->wake(get_thread(*rit));
975                 }
976                 waiters->clear();
977                 //check whether we should go to sleep or not...simulate spurious failures
978                 if (curr->get_node()->get_misc() == 0) {
979                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
980                         //disable us
981                         scheduler->sleep(get_thread(curr));
982                 }
983                 break;
984         }
985         case ATOMIC_NOTIFY_ALL: {
986                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
987                 //activate all the waiting threads
988                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
989                         scheduler->wake(get_thread(*rit));
990                 }
991                 waiters->clear();
992                 break;
993         }
994         case ATOMIC_NOTIFY_ONE: {
995                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
996                 int wakeupthread = curr->get_node()->get_misc();
997                 action_list_t::iterator it = waiters->begin();
998                 advance(it, wakeupthread);
999                 scheduler->wake(get_thread(*it));
1000                 waiters->erase(it);
1001                 break;
1002         }
1003
1004         default:
1005                 ASSERT(0);
1006         }
1007         return false;
1008 }
1009
1010 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1011 {
1012         /* Do more ambitious checks now that mo is more complete */
1013         if (mo_may_allow(writer, reader)) {
1014                 Node *node = reader->get_node();
1015
1016                 /* Find an ancestor thread which exists at the time of the reader */
1017                 Thread *write_thread = get_thread(writer);
1018                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1019                         write_thread = write_thread->get_parent();
1020
1021                 struct future_value fv = {
1022                         writer->get_value(),
1023                         writer->get_seq_number() + params.maxfuturedelay,
1024                         write_thread->get_id(),
1025                 };
1026                 if (node->add_future_value(fv))
1027                         set_latest_backtrack(reader);
1028         }
1029 }
1030
1031 /**
1032  * Process a write ModelAction
1033  * @param curr The ModelAction to process
1034  * @return True if the mo_graph was updated or promises were resolved
1035  */
1036 bool ModelChecker::process_write(ModelAction *curr)
1037 {
1038         bool updated_mod_order = w_modification_order(curr);
1039         bool updated_promises = resolve_promises(curr);
1040
1041         if (promises->size() == 0) {
1042                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1043                         struct PendingFutureValue pfv = (*futurevalues)[i];
1044                         add_future_value(pfv.writer, pfv.act);
1045                 }
1046                 futurevalues->clear();
1047         }
1048
1049         mo_graph->commitChanges();
1050         mo_check_promises(curr, false);
1051
1052         get_thread(curr)->set_return_value(VALUE_NONE);
1053         return updated_mod_order || updated_promises;
1054 }
1055
1056 /**
1057  * Process a fence ModelAction
1058  * @param curr The ModelAction to process
1059  * @return True if synchronization was updated
1060  */
1061 bool ModelChecker::process_fence(ModelAction *curr)
1062 {
1063         /*
1064          * fence-relaxed: no-op
1065          * fence-release: only log the occurence (not in this function), for
1066          *   use in later synchronization
1067          * fence-acquire (this function): search for hypothetical release
1068          *   sequences
1069          */
1070         bool updated = false;
1071         if (curr->is_acquire()) {
1072                 action_list_t *list = action_trace;
1073                 action_list_t::reverse_iterator rit;
1074                 /* Find X : is_read(X) && X --sb-> curr */
1075                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1076                         ModelAction *act = *rit;
1077                         if (act == curr)
1078                                 continue;
1079                         if (act->get_tid() != curr->get_tid())
1080                                 continue;
1081                         /* Stop at the beginning of the thread */
1082                         if (act->is_thread_start())
1083                                 break;
1084                         /* Stop once we reach a prior fence-acquire */
1085                         if (act->is_fence() && act->is_acquire())
1086                                 break;
1087                         if (!act->is_read())
1088                                 continue;
1089                         /* read-acquire will find its own release sequences */
1090                         if (act->is_acquire())
1091                                 continue;
1092
1093                         /* Establish hypothetical release sequences */
1094                         rel_heads_list_t release_heads;
1095                         get_release_seq_heads(curr, act, &release_heads);
1096                         for (unsigned int i = 0; i < release_heads.size(); i++)
1097                                 if (!curr->synchronize_with(release_heads[i]))
1098                                         set_bad_synchronization();
1099                         if (release_heads.size() != 0)
1100                                 updated = true;
1101                 }
1102         }
1103         return updated;
1104 }
1105
1106 /**
1107  * @brief Process the current action for thread-related activity
1108  *
1109  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1110  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1111  * synchronization, etc.  This function is a no-op for non-THREAD actions
1112  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1113  *
1114  * @param curr The current action
1115  * @return True if synchronization was updated or a thread completed
1116  */
1117 bool ModelChecker::process_thread_action(ModelAction *curr)
1118 {
1119         bool updated = false;
1120
1121         switch (curr->get_type()) {
1122         case THREAD_CREATE: {
1123                 thrd_t *thrd = (thrd_t *)curr->get_location();
1124                 struct thread_params *params = (struct thread_params *)curr->get_value();
1125                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1126                 add_thread(th);
1127                 th->set_creation(curr);
1128                 /* Promises can be satisfied by children */
1129                 for (unsigned int i = 0; i < promises->size(); i++) {
1130                         Promise *promise = (*promises)[i];
1131                         if (promise->thread_is_available(curr->get_tid()))
1132                                 promise->add_thread(th->get_id());
1133                 }
1134                 break;
1135         }
1136         case THREAD_JOIN: {
1137                 Thread *blocking = curr->get_thread_operand();
1138                 ModelAction *act = get_last_action(blocking->get_id());
1139                 curr->synchronize_with(act);
1140                 updated = true; /* trigger rel-seq checks */
1141                 break;
1142         }
1143         case THREAD_FINISH: {
1144                 Thread *th = get_thread(curr);
1145                 while (!th->wait_list_empty()) {
1146                         ModelAction *act = th->pop_wait_list();
1147                         scheduler->wake(get_thread(act));
1148                 }
1149                 th->complete();
1150                 /* Completed thread can't satisfy promises */
1151                 for (unsigned int i = 0; i < promises->size(); i++) {
1152                         Promise *promise = (*promises)[i];
1153                         if (promise->thread_is_available(th->get_id()))
1154                                 if (promise->eliminate_thread(th->get_id()))
1155                                         priv->failed_promise = true;
1156                 }
1157                 updated = true; /* trigger rel-seq checks */
1158                 break;
1159         }
1160         case THREAD_START: {
1161                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1162                 break;
1163         }
1164         default:
1165                 break;
1166         }
1167
1168         return updated;
1169 }
1170
1171 /**
1172  * @brief Process the current action for release sequence fixup activity
1173  *
1174  * Performs model-checker release sequence fixups for the current action,
1175  * forcing a single pending release sequence to break (with a given, potential
1176  * "loose" write) or to complete (i.e., synchronize). If a pending release
1177  * sequence forms a complete release sequence, then we must perform the fixup
1178  * synchronization, mo_graph additions, etc.
1179  *
1180  * @param curr The current action; must be a release sequence fixup action
1181  * @param work_queue The work queue to which to add work items as they are
1182  * generated
1183  */
1184 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1185 {
1186         const ModelAction *write = curr->get_node()->get_relseq_break();
1187         struct release_seq *sequence = pending_rel_seqs->back();
1188         pending_rel_seqs->pop_back();
1189         ASSERT(sequence);
1190         ModelAction *acquire = sequence->acquire;
1191         const ModelAction *rf = sequence->rf;
1192         const ModelAction *release = sequence->release;
1193         ASSERT(acquire);
1194         ASSERT(release);
1195         ASSERT(rf);
1196         ASSERT(release->same_thread(rf));
1197
1198         if (write == NULL) {
1199                 /**
1200                  * @todo Forcing a synchronization requires that we set
1201                  * modification order constraints. For instance, we can't allow
1202                  * a fixup sequence in which two separate read-acquire
1203                  * operations read from the same sequence, where the first one
1204                  * synchronizes and the other doesn't. Essentially, we can't
1205                  * allow any writes to insert themselves between 'release' and
1206                  * 'rf'
1207                  */
1208
1209                 /* Must synchronize */
1210                 if (!acquire->synchronize_with(release)) {
1211                         set_bad_synchronization();
1212                         return;
1213                 }
1214                 /* Re-check all pending release sequences */
1215                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1216                 /* Re-check act for mo_graph edges */
1217                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1218
1219                 /* propagate synchronization to later actions */
1220                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1221                 for (; (*rit) != acquire; rit++) {
1222                         ModelAction *propagate = *rit;
1223                         if (acquire->happens_before(propagate)) {
1224                                 propagate->synchronize_with(acquire);
1225                                 /* Re-check 'propagate' for mo_graph edges */
1226                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1227                         }
1228                 }
1229         } else {
1230                 /* Break release sequence with new edges:
1231                  *   release --mo--> write --mo--> rf */
1232                 mo_graph->addEdge(release, write);
1233                 mo_graph->addEdge(write, rf);
1234         }
1235
1236         /* See if we have realized a data race */
1237         checkDataRaces();
1238 }
1239
1240 /**
1241  * Initialize the current action by performing one or more of the following
1242  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1243  * in the NodeStack, manipulating backtracking sets, allocating and
1244  * initializing clock vectors, and computing the promises to fulfill.
1245  *
1246  * @param curr The current action, as passed from the user context; may be
1247  * freed/invalidated after the execution of this function, with a different
1248  * action "returned" its place (pass-by-reference)
1249  * @return True if curr is a newly-explored action; false otherwise
1250  */
1251 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1252 {
1253         ModelAction *newcurr;
1254
1255         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1256                 newcurr = process_rmw(*curr);
1257                 delete *curr;
1258
1259                 if (newcurr->is_rmw())
1260                         compute_promises(newcurr);
1261
1262                 *curr = newcurr;
1263                 return false;
1264         }
1265
1266         (*curr)->set_seq_number(get_next_seq_num());
1267
1268         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1269         if (newcurr) {
1270                 /* First restore type and order in case of RMW operation */
1271                 if ((*curr)->is_rmwr())
1272                         newcurr->copy_typeandorder(*curr);
1273
1274                 ASSERT((*curr)->get_location() == newcurr->get_location());
1275                 newcurr->copy_from_new(*curr);
1276
1277                 /* Discard duplicate ModelAction; use action from NodeStack */
1278                 delete *curr;
1279
1280                 /* Always compute new clock vector */
1281                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1282
1283                 *curr = newcurr;
1284                 return false; /* Action was explored previously */
1285         } else {
1286                 newcurr = *curr;
1287
1288                 /* Always compute new clock vector */
1289                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1290
1291                 /* Assign most recent release fence */
1292                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1293
1294                 /*
1295                  * Perform one-time actions when pushing new ModelAction onto
1296                  * NodeStack
1297                  */
1298                 if (newcurr->is_write())
1299                         compute_promises(newcurr);
1300                 else if (newcurr->is_relseq_fixup())
1301                         compute_relseq_breakwrites(newcurr);
1302                 else if (newcurr->is_wait())
1303                         newcurr->get_node()->set_misc_max(2);
1304                 else if (newcurr->is_notify_one()) {
1305                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1306                 }
1307                 return true; /* This was a new ModelAction */
1308         }
1309 }
1310
1311 /**
1312  * @brief Establish reads-from relation between two actions
1313  *
1314  * Perform basic operations involved with establishing a concrete rf relation,
1315  * including setting the ModelAction data and checking for release sequences.
1316  *
1317  * @param act The action that is reading (must be a read)
1318  * @param rf The action from which we are reading (must be a write)
1319  *
1320  * @return True if this read established synchronization
1321  */
1322 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1323 {
1324         act->set_read_from(rf);
1325         if (rf != NULL && act->is_acquire()) {
1326                 rel_heads_list_t release_heads;
1327                 get_release_seq_heads(act, act, &release_heads);
1328                 int num_heads = release_heads.size();
1329                 for (unsigned int i = 0; i < release_heads.size(); i++)
1330                         if (!act->synchronize_with(release_heads[i])) {
1331                                 set_bad_synchronization();
1332                                 num_heads--;
1333                         }
1334                 return num_heads > 0;
1335         }
1336         return false;
1337 }
1338
1339 /**
1340  * Check promises and eliminate potentially-satisfying threads when a thread is
1341  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1342  * no longer satisfy a promise generated from that thread.
1343  *
1344  * @param blocker The thread on which a thread is waiting
1345  * @param waiting The waiting thread
1346  */
1347 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1348 {
1349         for (unsigned int i = 0; i < promises->size(); i++) {
1350                 Promise *promise = (*promises)[i];
1351                 ModelAction *reader = promise->get_action();
1352                 if (reader->get_tid() != blocker->get_id())
1353                         continue;
1354                 if (!promise->thread_is_available(waiting->get_id()))
1355                         continue;
1356                 if (promise->eliminate_thread(waiting->get_id())) {
1357                         /* Promise has failed */
1358                         priv->failed_promise = true;
1359                 }
1360         }
1361 }
1362
1363 /**
1364  * @brief Check whether a model action is enabled.
1365  *
1366  * Checks whether a lock or join operation would be successful (i.e., is the
1367  * lock already locked, or is the joined thread already complete). If not, put
1368  * the action in a waiter list.
1369  *
1370  * @param curr is the ModelAction to check whether it is enabled.
1371  * @return a bool that indicates whether the action is enabled.
1372  */
1373 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1374         if (curr->is_lock()) {
1375                 std::mutex *lock = (std::mutex *)curr->get_location();
1376                 struct std::mutex_state *state = lock->get_state();
1377                 if (state->islocked) {
1378                         //Stick the action in the appropriate waiting queue
1379                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1380                         return false;
1381                 }
1382         } else if (curr->get_type() == THREAD_JOIN) {
1383                 Thread *blocking = (Thread *)curr->get_location();
1384                 if (!blocking->is_complete()) {
1385                         blocking->push_wait_list(curr);
1386                         thread_blocking_check_promises(blocking, get_thread(curr));
1387                         return false;
1388                 }
1389         }
1390
1391         return true;
1392 }
1393
1394 /**
1395  * This is the heart of the model checker routine. It performs model-checking
1396  * actions corresponding to a given "current action." Among other processes, it
1397  * calculates reads-from relationships, updates synchronization clock vectors,
1398  * forms a memory_order constraints graph, and handles replay/backtrack
1399  * execution when running permutations of previously-observed executions.
1400  *
1401  * @param curr The current action to process
1402  * @return The ModelAction that is actually executed; may be different than
1403  * curr; may be NULL, if the current action is not enabled to run
1404  */
1405 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1406 {
1407         ASSERT(curr);
1408         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1409
1410         if (!check_action_enabled(curr)) {
1411                 /* Make the execution look like we chose to run this action
1412                  * much later, when a lock/join can succeed */
1413                 get_thread(curr)->set_pending(curr);
1414                 scheduler->sleep(get_thread(curr));
1415                 return NULL;
1416         }
1417
1418         bool newly_explored = initialize_curr_action(&curr);
1419
1420         DBG();
1421         if (DBG_ENABLED())
1422                 curr->print();
1423
1424         wake_up_sleeping_actions(curr);
1425
1426         /* Add the action to lists before any other model-checking tasks */
1427         if (!second_part_of_rmw)
1428                 add_action_to_lists(curr);
1429
1430         /* Build may_read_from set for newly-created actions */
1431         if (newly_explored && curr->is_read())
1432                 build_may_read_from(curr);
1433
1434         /* Initialize work_queue with the "current action" work */
1435         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1436         while (!work_queue.empty() && !has_asserted()) {
1437                 WorkQueueEntry work = work_queue.front();
1438                 work_queue.pop_front();
1439
1440                 switch (work.type) {
1441                 case WORK_CHECK_CURR_ACTION: {
1442                         ModelAction *act = work.action;
1443                         bool update = false; /* update this location's release seq's */
1444                         bool update_all = false; /* update all release seq's */
1445
1446                         if (process_thread_action(curr))
1447                                 update_all = true;
1448
1449                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1450                                 update = true;
1451
1452                         if (act->is_write() && process_write(act))
1453                                 update = true;
1454
1455                         if (act->is_fence() && process_fence(act))
1456                                 update_all = true;
1457
1458                         if (act->is_mutex_op() && process_mutex(act))
1459                                 update_all = true;
1460
1461                         if (act->is_relseq_fixup())
1462                                 process_relseq_fixup(curr, &work_queue);
1463
1464                         if (update_all)
1465                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1466                         else if (update)
1467                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1468                         break;
1469                 }
1470                 case WORK_CHECK_RELEASE_SEQ:
1471                         resolve_release_sequences(work.location, &work_queue);
1472                         break;
1473                 case WORK_CHECK_MO_EDGES: {
1474                         /** @todo Complete verification of work_queue */
1475                         ModelAction *act = work.action;
1476                         bool updated = false;
1477
1478                         if (act->is_read()) {
1479                                 const ModelAction *rf = act->get_reads_from();
1480                                 const Promise *promise = act->get_reads_from_promise();
1481                                 if (rf) {
1482                                         if (r_modification_order(act, rf))
1483                                                 updated = true;
1484                                 } else if (promise) {
1485                                         if (r_modification_order(act, promise))
1486                                                 updated = true;
1487                                 }
1488                         }
1489                         if (act->is_write()) {
1490                                 if (w_modification_order(act))
1491                                         updated = true;
1492                         }
1493                         mo_graph->commitChanges();
1494
1495                         if (updated)
1496                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1497                         break;
1498                 }
1499                 default:
1500                         ASSERT(false);
1501                         break;
1502                 }
1503         }
1504
1505         check_curr_backtracking(curr);
1506         set_backtracking(curr);
1507         return curr;
1508 }
1509
1510 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1511 {
1512         Node *currnode = curr->get_node();
1513         Node *parnode = currnode->get_parent();
1514
1515         if ((parnode && !parnode->backtrack_empty()) ||
1516                          !currnode->misc_empty() ||
1517                          !currnode->read_from_empty() ||
1518                          !currnode->promise_empty() ||
1519                          !currnode->relseq_break_empty()) {
1520                 set_latest_backtrack(curr);
1521         }
1522 }
1523
1524 bool ModelChecker::promises_expired() const
1525 {
1526         for (unsigned int i = 0; i < promises->size(); i++) {
1527                 Promise *promise = (*promises)[i];
1528                 if (promise->get_expiration() < priv->used_sequence_numbers)
1529                         return true;
1530         }
1531         return false;
1532 }
1533
1534 /**
1535  * This is the strongest feasibility check available.
1536  * @return whether the current trace (partial or complete) must be a prefix of
1537  * a feasible trace.
1538  */
1539 bool ModelChecker::isfeasibleprefix() const
1540 {
1541         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1542 }
1543
1544 /**
1545  * Print disagnostic information about an infeasible execution
1546  * @param prefix A string to prefix the output with; if NULL, then a default
1547  * message prefix will be provided
1548  */
1549 void ModelChecker::print_infeasibility(const char *prefix) const
1550 {
1551         char buf[100];
1552         char *ptr = buf;
1553         if (mo_graph->checkForCycles())
1554                 ptr += sprintf(ptr, "[mo cycle]");
1555         if (priv->failed_promise)
1556                 ptr += sprintf(ptr, "[failed promise]");
1557         if (priv->too_many_reads)
1558                 ptr += sprintf(ptr, "[too many reads]");
1559         if (priv->no_valid_reads)
1560                 ptr += sprintf(ptr, "[no valid reads-from]");
1561         if (priv->bad_synchronization)
1562                 ptr += sprintf(ptr, "[bad sw ordering]");
1563         if (promises_expired())
1564                 ptr += sprintf(ptr, "[promise expired]");
1565         if (promises->size() != 0)
1566                 ptr += sprintf(ptr, "[unresolved promise]");
1567         if (ptr != buf)
1568                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1569 }
1570
1571 /**
1572  * Returns whether the current completed trace is feasible, except for pending
1573  * release sequences.
1574  */
1575 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1576 {
1577         return !is_infeasible() && promises->size() == 0;
1578 }
1579
1580 /**
1581  * Check if the current partial trace is infeasible. Does not check any
1582  * end-of-execution flags, which might rule out the execution. Thus, this is
1583  * useful only for ruling an execution as infeasible.
1584  * @return whether the current partial trace is infeasible.
1585  */
1586 bool ModelChecker::is_infeasible() const
1587 {
1588         return mo_graph->checkForCycles() ||
1589                 priv->no_valid_reads ||
1590                 priv->failed_promise ||
1591                 priv->too_many_reads ||
1592                 priv->bad_synchronization ||
1593                 promises_expired();
1594 }
1595
1596 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1597 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1598         ModelAction *lastread = get_last_action(act->get_tid());
1599         lastread->process_rmw(act);
1600         if (act->is_rmw()) {
1601                 if (lastread->get_reads_from())
1602                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1603                 else
1604                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1605                 mo_graph->commitChanges();
1606         }
1607         return lastread;
1608 }
1609
1610 /**
1611  * Checks whether a thread has read from the same write for too many times
1612  * without seeing the effects of a later write.
1613  *
1614  * Basic idea:
1615  * 1) there must a different write that we could read from that would satisfy the modification order,
1616  * 2) we must have read from the same value in excess of maxreads times, and
1617  * 3) that other write must have been in the reads_from set for maxreads times.
1618  *
1619  * If so, we decide that the execution is no longer feasible.
1620  */
1621 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1622 {
1623         if (params.maxreads != 0) {
1624                 if (curr->get_node()->get_read_from_past_size() <= 1)
1625                         return;
1626                 //Must make sure that execution is currently feasible...  We could
1627                 //accidentally clear by rolling back
1628                 if (is_infeasible())
1629                         return;
1630                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1631                 int tid = id_to_int(curr->get_tid());
1632
1633                 /* Skip checks */
1634                 if ((int)thrd_lists->size() <= tid)
1635                         return;
1636                 action_list_t *list = &(*thrd_lists)[tid];
1637
1638                 action_list_t::reverse_iterator rit = list->rbegin();
1639                 /* Skip past curr */
1640                 for (; (*rit) != curr; rit++)
1641                         ;
1642                 /* go past curr now */
1643                 rit++;
1644
1645                 action_list_t::reverse_iterator ritcopy = rit;
1646                 //See if we have enough reads from the same value
1647                 int count = 0;
1648                 for (; count < params.maxreads; rit++, count++) {
1649                         if (rit == list->rend())
1650                                 return;
1651                         ModelAction *act = *rit;
1652                         if (!act->is_read())
1653                                 return;
1654
1655                         if (act->get_reads_from() != rf)
1656                                 return;
1657                         if (act->get_node()->get_read_from_past_size() <= 1)
1658                                 return;
1659                 }
1660                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1661                         /* Get write */
1662                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1663
1664                         /* Need a different write */
1665                         if (write == rf)
1666                                 continue;
1667
1668                         /* Test to see whether this is a feasible write to read from */
1669                         /** NOTE: all members of read-from set should be
1670                          *  feasible, so we no longer check it here **/
1671
1672                         rit = ritcopy;
1673
1674                         bool feasiblewrite = true;
1675                         //new we need to see if this write works for everyone
1676
1677                         for (int loop = count; loop > 0; loop--, rit++) {
1678                                 ModelAction *act = *rit;
1679                                 bool foundvalue = false;
1680                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1681                                         if (act->get_node()->get_read_from_past(j) == write) {
1682                                                 foundvalue = true;
1683                                                 break;
1684                                         }
1685                                 }
1686                                 if (!foundvalue) {
1687                                         feasiblewrite = false;
1688                                         break;
1689                                 }
1690                         }
1691                         if (feasiblewrite) {
1692                                 priv->too_many_reads = true;
1693                                 return;
1694                         }
1695                 }
1696         }
1697 }
1698
1699 /**
1700  * Updates the mo_graph with the constraints imposed from the current
1701  * read.
1702  *
1703  * Basic idea is the following: Go through each other thread and find
1704  * the last action that happened before our read.  Two cases:
1705  *
1706  * (1) The action is a write => that write must either occur before
1707  * the write we read from or be the write we read from.
1708  *
1709  * (2) The action is a read => the write that that action read from
1710  * must occur before the write we read from or be the same write.
1711  *
1712  * @param curr The current action. Must be a read.
1713  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1714  * @return True if modification order edges were added; false otherwise
1715  */
1716 template <typename rf_type>
1717 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1718 {
1719         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1720         unsigned int i;
1721         bool added = false;
1722         ASSERT(curr->is_read());
1723
1724         /* Last SC fence in the current thread */
1725         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1726
1727         /* Iterate over all threads */
1728         for (i = 0; i < thrd_lists->size(); i++) {
1729                 /* Last SC fence in thread i */
1730                 ModelAction *last_sc_fence_thread_local = NULL;
1731                 if (int_to_id((int)i) != curr->get_tid())
1732                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1733
1734                 /* Last SC fence in thread i, before last SC fence in current thread */
1735                 ModelAction *last_sc_fence_thread_before = NULL;
1736                 if (last_sc_fence_local)
1737                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1738
1739                 /* Iterate over actions in thread, starting from most recent */
1740                 action_list_t *list = &(*thrd_lists)[i];
1741                 action_list_t::reverse_iterator rit;
1742                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1743                         ModelAction *act = *rit;
1744
1745                         if (act->is_write() && !act->equals(rf) && act != curr) {
1746                                 /* C++, Section 29.3 statement 5 */
1747                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1748                                                 *act < *last_sc_fence_thread_local) {
1749                                         added = mo_graph->addEdge(act, rf) || added;
1750                                         break;
1751                                 }
1752                                 /* C++, Section 29.3 statement 4 */
1753                                 else if (act->is_seqcst() && last_sc_fence_local &&
1754                                                 *act < *last_sc_fence_local) {
1755                                         added = mo_graph->addEdge(act, rf) || added;
1756                                         break;
1757                                 }
1758                                 /* C++, Section 29.3 statement 6 */
1759                                 else if (last_sc_fence_thread_before &&
1760                                                 *act < *last_sc_fence_thread_before) {
1761                                         added = mo_graph->addEdge(act, rf) || added;
1762                                         break;
1763                                 }
1764                         }
1765
1766                         /*
1767                          * Include at most one act per-thread that "happens
1768                          * before" curr. Don't consider reflexively.
1769                          */
1770                         if (act->happens_before(curr) && act != curr) {
1771                                 if (act->is_write()) {
1772                                         if (!act->equals(rf)) {
1773                                                 added = mo_graph->addEdge(act, rf) || added;
1774                                         }
1775                                 } else {
1776                                         const ModelAction *prevrf = act->get_reads_from();
1777                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1778                                         if (prevrf) {
1779                                                 if (!prevrf->equals(rf))
1780                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1781                                         } else if (!prevrf_promise->equals(rf)) {
1782                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1783                                         }
1784                                 }
1785                                 break;
1786                         }
1787                 }
1788         }
1789
1790         /*
1791          * All compatible, thread-exclusive promises must be ordered after any
1792          * concrete loads from the same thread
1793          */
1794         for (unsigned int i = 0; i < promises->size(); i++)
1795                 if ((*promises)[i]->is_compatible_exclusive(curr))
1796                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1797
1798         return added;
1799 }
1800
1801 /**
1802  * Updates the mo_graph with the constraints imposed from the current write.
1803  *
1804  * Basic idea is the following: Go through each other thread and find
1805  * the lastest action that happened before our write.  Two cases:
1806  *
1807  * (1) The action is a write => that write must occur before
1808  * the current write
1809  *
1810  * (2) The action is a read => the write that that action read from
1811  * must occur before the current write.
1812  *
1813  * This method also handles two other issues:
1814  *
1815  * (I) Sequential Consistency: Making sure that if the current write is
1816  * seq_cst, that it occurs after the previous seq_cst write.
1817  *
1818  * (II) Sending the write back to non-synchronizing reads.
1819  *
1820  * @param curr The current action. Must be a write.
1821  * @return True if modification order edges were added; false otherwise
1822  */
1823 bool ModelChecker::w_modification_order(ModelAction *curr)
1824 {
1825         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1826         unsigned int i;
1827         bool added = false;
1828         ASSERT(curr->is_write());
1829
1830         if (curr->is_seqcst()) {
1831                 /* We have to at least see the last sequentially consistent write,
1832                          so we are initialized. */
1833                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1834                 if (last_seq_cst != NULL) {
1835                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1836                 }
1837         }
1838
1839         /* Last SC fence in the current thread */
1840         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1841
1842         /* Iterate over all threads */
1843         for (i = 0; i < thrd_lists->size(); i++) {
1844                 /* Last SC fence in thread i, before last SC fence in current thread */
1845                 ModelAction *last_sc_fence_thread_before = NULL;
1846                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1847                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1848
1849                 /* Iterate over actions in thread, starting from most recent */
1850                 action_list_t *list = &(*thrd_lists)[i];
1851                 action_list_t::reverse_iterator rit;
1852                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1853                         ModelAction *act = *rit;
1854                         if (act == curr) {
1855                                 /*
1856                                  * 1) If RMW and it actually read from something, then we
1857                                  * already have all relevant edges, so just skip to next
1858                                  * thread.
1859                                  *
1860                                  * 2) If RMW and it didn't read from anything, we should
1861                                  * whatever edge we can get to speed up convergence.
1862                                  *
1863                                  * 3) If normal write, we need to look at earlier actions, so
1864                                  * continue processing list.
1865                                  */
1866                                 if (curr->is_rmw()) {
1867                                         if (curr->get_reads_from() != NULL)
1868                                                 break;
1869                                         else
1870                                                 continue;
1871                                 } else
1872                                         continue;
1873                         }
1874
1875                         /* C++, Section 29.3 statement 7 */
1876                         if (last_sc_fence_thread_before && act->is_write() &&
1877                                         *act < *last_sc_fence_thread_before) {
1878                                 added = mo_graph->addEdge(act, curr) || added;
1879                                 break;
1880                         }
1881
1882                         /*
1883                          * Include at most one act per-thread that "happens
1884                          * before" curr
1885                          */
1886                         if (act->happens_before(curr)) {
1887                                 /*
1888                                  * Note: if act is RMW, just add edge:
1889                                  *   act --mo--> curr
1890                                  * The following edge should be handled elsewhere:
1891                                  *   readfrom(act) --mo--> act
1892                                  */
1893                                 if (act->is_write())
1894                                         added = mo_graph->addEdge(act, curr) || added;
1895                                 else if (act->is_read()) {
1896                                         //if previous read accessed a null, just keep going
1897                                         if (act->get_reads_from() == NULL)
1898                                                 continue;
1899                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1900                                 }
1901                                 break;
1902                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1903                                                      !act->same_thread(curr)) {
1904                                 /* We have an action that:
1905                                    (1) did not happen before us
1906                                    (2) is a read and we are a write
1907                                    (3) cannot synchronize with us
1908                                    (4) is in a different thread
1909                                    =>
1910                                    that read could potentially read from our write.  Note that
1911                                    these checks are overly conservative at this point, we'll
1912                                    do more checks before actually removing the
1913                                    pendingfuturevalue.
1914
1915                                  */
1916                                 if (thin_air_constraint_may_allow(curr, act)) {
1917                                         if (!is_infeasible())
1918                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1919                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1920                                                 add_future_value(curr, act);
1921                                 }
1922                         }
1923                 }
1924         }
1925
1926         /*
1927          * All compatible, thread-exclusive promises must be ordered after any
1928          * concrete stores to the same thread, or else they can be merged with
1929          * this store later
1930          */
1931         for (unsigned int i = 0; i < promises->size(); i++)
1932                 if ((*promises)[i]->is_compatible_exclusive(curr))
1933                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1934
1935         return added;
1936 }
1937
1938 /** Arbitrary reads from the future are not allowed.  Section 29.3
1939  * part 9 places some constraints.  This method checks one result of constraint
1940  * constraint.  Others require compiler support. */
1941 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1942 {
1943         if (!writer->is_rmw())
1944                 return true;
1945
1946         if (!reader->is_rmw())
1947                 return true;
1948
1949         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1950                 if (search == reader)
1951                         return false;
1952                 if (search->get_tid() == reader->get_tid() &&
1953                                 search->happens_before(reader))
1954                         break;
1955         }
1956
1957         return true;
1958 }
1959
1960 /**
1961  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1962  * some constraints. This method checks one the following constraint (others
1963  * require compiler support):
1964  *
1965  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1966  */
1967 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1968 {
1969         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1970         unsigned int i;
1971         /* Iterate over all threads */
1972         for (i = 0; i < thrd_lists->size(); i++) {
1973                 const ModelAction *write_after_read = NULL;
1974
1975                 /* Iterate over actions in thread, starting from most recent */
1976                 action_list_t *list = &(*thrd_lists)[i];
1977                 action_list_t::reverse_iterator rit;
1978                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1979                         ModelAction *act = *rit;
1980
1981                         /* Don't disallow due to act == reader */
1982                         if (!reader->happens_before(act) || reader == act)
1983                                 break;
1984                         else if (act->is_write())
1985                                 write_after_read = act;
1986                         else if (act->is_read() && act->get_reads_from() != NULL)
1987                                 write_after_read = act->get_reads_from();
1988                 }
1989
1990                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1991                         return false;
1992         }
1993         return true;
1994 }
1995
1996 /**
1997  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1998  * The ModelAction under consideration is expected to be taking part in
1999  * release/acquire synchronization as an object of the "reads from" relation.
2000  * Note that this can only provide release sequence support for RMW chains
2001  * which do not read from the future, as those actions cannot be traced until
2002  * their "promise" is fulfilled. Similarly, we may not even establish the
2003  * presence of a release sequence with certainty, as some modification order
2004  * constraints may be decided further in the future. Thus, this function
2005  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2006  * and a boolean representing certainty.
2007  *
2008  * @param rf The action that might be part of a release sequence. Must be a
2009  * write.
2010  * @param release_heads A pass-by-reference style return parameter. After
2011  * execution of this function, release_heads will contain the heads of all the
2012  * relevant release sequences, if any exists with certainty
2013  * @param pending A pass-by-reference style return parameter which is only used
2014  * when returning false (i.e., uncertain). Returns most information regarding
2015  * an uncertain release sequence, including any write operations that might
2016  * break the sequence.
2017  * @return true, if the ModelChecker is certain that release_heads is complete;
2018  * false otherwise
2019  */
2020 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2021                 rel_heads_list_t *release_heads,
2022                 struct release_seq *pending) const
2023 {
2024         /* Only check for release sequences if there are no cycles */
2025         if (mo_graph->checkForCycles())
2026                 return false;
2027
2028         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2029                 ASSERT(rf->is_write());
2030
2031                 if (rf->is_release())
2032                         release_heads->push_back(rf);
2033                 else if (rf->get_last_fence_release())
2034                         release_heads->push_back(rf->get_last_fence_release());
2035                 if (!rf->is_rmw())
2036                         break; /* End of RMW chain */
2037
2038                 /** @todo Need to be smarter here...  In the linux lock
2039                  * example, this will run to the beginning of the program for
2040                  * every acquire. */
2041                 /** @todo The way to be smarter here is to keep going until 1
2042                  * thread has a release preceded by an acquire and you've seen
2043                  *       both. */
2044
2045                 /* acq_rel RMW is a sufficient stopping condition */
2046                 if (rf->is_acquire() && rf->is_release())
2047                         return true; /* complete */
2048         };
2049         if (!rf) {
2050                 /* read from future: need to settle this later */
2051                 pending->rf = NULL;
2052                 return false; /* incomplete */
2053         }
2054
2055         if (rf->is_release())
2056                 return true; /* complete */
2057
2058         /* else relaxed write
2059          * - check for fence-release in the same thread (29.8, stmt. 3)
2060          * - check modification order for contiguous subsequence
2061          *   -> rf must be same thread as release */
2062
2063         const ModelAction *fence_release = rf->get_last_fence_release();
2064         /* Synchronize with a fence-release unconditionally; we don't need to
2065          * find any more "contiguous subsequence..." for it */
2066         if (fence_release)
2067                 release_heads->push_back(fence_release);
2068
2069         int tid = id_to_int(rf->get_tid());
2070         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2071         action_list_t *list = &(*thrd_lists)[tid];
2072         action_list_t::const_reverse_iterator rit;
2073
2074         /* Find rf in the thread list */
2075         rit = std::find(list->rbegin(), list->rend(), rf);
2076         ASSERT(rit != list->rend());
2077
2078         /* Find the last {write,fence}-release */
2079         for (; rit != list->rend(); rit++) {
2080                 if (fence_release && *(*rit) < *fence_release)
2081                         break;
2082                 if ((*rit)->is_release())
2083                         break;
2084         }
2085         if (rit == list->rend()) {
2086                 /* No write-release in this thread */
2087                 return true; /* complete */
2088         } else if (fence_release && *(*rit) < *fence_release) {
2089                 /* The fence-release is more recent (and so, "stronger") than
2090                  * the most recent write-release */
2091                 return true; /* complete */
2092         } /* else, need to establish contiguous release sequence */
2093         ModelAction *release = *rit;
2094
2095         ASSERT(rf->same_thread(release));
2096
2097         pending->writes.clear();
2098
2099         bool certain = true;
2100         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2101                 if (id_to_int(rf->get_tid()) == (int)i)
2102                         continue;
2103                 list = &(*thrd_lists)[i];
2104
2105                 /* Can we ensure no future writes from this thread may break
2106                  * the release seq? */
2107                 bool future_ordered = false;
2108
2109                 ModelAction *last = get_last_action(int_to_id(i));
2110                 Thread *th = get_thread(int_to_id(i));
2111                 if ((last && rf->happens_before(last)) ||
2112                                 !is_enabled(th) ||
2113                                 th->is_complete())
2114                         future_ordered = true;
2115
2116                 ASSERT(!th->is_model_thread() || future_ordered);
2117
2118                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2119                         const ModelAction *act = *rit;
2120                         /* Reach synchronization -> this thread is complete */
2121                         if (act->happens_before(release))
2122                                 break;
2123                         if (rf->happens_before(act)) {
2124                                 future_ordered = true;
2125                                 continue;
2126                         }
2127
2128                         /* Only non-RMW writes can break release sequences */
2129                         if (!act->is_write() || act->is_rmw())
2130                                 continue;
2131
2132                         /* Check modification order */
2133                         if (mo_graph->checkReachable(rf, act)) {
2134                                 /* rf --mo--> act */
2135                                 future_ordered = true;
2136                                 continue;
2137                         }
2138                         if (mo_graph->checkReachable(act, release))
2139                                 /* act --mo--> release */
2140                                 break;
2141                         if (mo_graph->checkReachable(release, act) &&
2142                                       mo_graph->checkReachable(act, rf)) {
2143                                 /* release --mo-> act --mo--> rf */
2144                                 return true; /* complete */
2145                         }
2146                         /* act may break release sequence */
2147                         pending->writes.push_back(act);
2148                         certain = false;
2149                 }
2150                 if (!future_ordered)
2151                         certain = false; /* This thread is uncertain */
2152         }
2153
2154         if (certain) {
2155                 release_heads->push_back(release);
2156                 pending->writes.clear();
2157         } else {
2158                 pending->release = release;
2159                 pending->rf = rf;
2160         }
2161         return certain;
2162 }
2163
2164 /**
2165  * An interface for getting the release sequence head(s) with which a
2166  * given ModelAction must synchronize. This function only returns a non-empty
2167  * result when it can locate a release sequence head with certainty. Otherwise,
2168  * it may mark the internal state of the ModelChecker so that it will handle
2169  * the release sequence at a later time, causing @a acquire to update its
2170  * synchronization at some later point in execution.
2171  *
2172  * @param acquire The 'acquire' action that may synchronize with a release
2173  * sequence
2174  * @param read The read action that may read from a release sequence; this may
2175  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2176  * when 'acquire' is a fence-acquire)
2177  * @param release_heads A pass-by-reference return parameter. Will be filled
2178  * with the head(s) of the release sequence(s), if they exists with certainty.
2179  * @see ModelChecker::release_seq_heads
2180  */
2181 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2182                 ModelAction *read, rel_heads_list_t *release_heads)
2183 {
2184         const ModelAction *rf = read->get_reads_from();
2185         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2186         sequence->acquire = acquire;
2187         sequence->read = read;
2188
2189         if (!release_seq_heads(rf, release_heads, sequence)) {
2190                 /* add act to 'lazy checking' list */
2191                 pending_rel_seqs->push_back(sequence);
2192         } else {
2193                 snapshot_free(sequence);
2194         }
2195 }
2196
2197 /**
2198  * Attempt to resolve all stashed operations that might synchronize with a
2199  * release sequence for a given location. This implements the "lazy" portion of
2200  * determining whether or not a release sequence was contiguous, since not all
2201  * modification order information is present at the time an action occurs.
2202  *
2203  * @param location The location/object that should be checked for release
2204  * sequence resolutions. A NULL value means to check all locations.
2205  * @param work_queue The work queue to which to add work items as they are
2206  * generated
2207  * @return True if any updates occurred (new synchronization, new mo_graph
2208  * edges)
2209  */
2210 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2211 {
2212         bool updated = false;
2213         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2214         while (it != pending_rel_seqs->end()) {
2215                 struct release_seq *pending = *it;
2216                 ModelAction *acquire = pending->acquire;
2217                 const ModelAction *read = pending->read;
2218
2219                 /* Only resolve sequences on the given location, if provided */
2220                 if (location && read->get_location() != location) {
2221                         it++;
2222                         continue;
2223                 }
2224
2225                 const ModelAction *rf = read->get_reads_from();
2226                 rel_heads_list_t release_heads;
2227                 bool complete;
2228                 complete = release_seq_heads(rf, &release_heads, pending);
2229                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2230                         if (!acquire->has_synchronized_with(release_heads[i])) {
2231                                 if (acquire->synchronize_with(release_heads[i]))
2232                                         updated = true;
2233                                 else
2234                                         set_bad_synchronization();
2235                         }
2236                 }
2237
2238                 if (updated) {
2239                         /* Re-check all pending release sequences */
2240                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2241                         /* Re-check read-acquire for mo_graph edges */
2242                         if (acquire->is_read())
2243                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2244
2245                         /* propagate synchronization to later actions */
2246                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2247                         for (; (*rit) != acquire; rit++) {
2248                                 ModelAction *propagate = *rit;
2249                                 if (acquire->happens_before(propagate)) {
2250                                         propagate->synchronize_with(acquire);
2251                                         /* Re-check 'propagate' for mo_graph edges */
2252                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2253                                 }
2254                         }
2255                 }
2256                 if (complete) {
2257                         it = pending_rel_seqs->erase(it);
2258                         snapshot_free(pending);
2259                 } else {
2260                         it++;
2261                 }
2262         }
2263
2264         // If we resolved promises or data races, see if we have realized a data race.
2265         checkDataRaces();
2266
2267         return updated;
2268 }
2269
2270 /**
2271  * Performs various bookkeeping operations for the current ModelAction. For
2272  * instance, adds action to the per-object, per-thread action vector and to the
2273  * action trace list of all thread actions.
2274  *
2275  * @param act is the ModelAction to add.
2276  */
2277 void ModelChecker::add_action_to_lists(ModelAction *act)
2278 {
2279         int tid = id_to_int(act->get_tid());
2280         ModelAction *uninit = NULL;
2281         int uninit_id = -1;
2282         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2283         if (list->empty() && act->is_atomic_var()) {
2284                 uninit = new_uninitialized_action(act->get_location());
2285                 uninit_id = id_to_int(uninit->get_tid());
2286                 list->push_back(uninit);
2287         }
2288         list->push_back(act);
2289
2290         action_trace->push_back(act);
2291         if (uninit)
2292                 action_trace->push_front(uninit);
2293
2294         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2295         if (tid >= (int)vec->size())
2296                 vec->resize(priv->next_thread_id);
2297         (*vec)[tid].push_back(act);
2298         if (uninit)
2299                 (*vec)[uninit_id].push_front(uninit);
2300
2301         if ((int)thrd_last_action->size() <= tid)
2302                 thrd_last_action->resize(get_num_threads());
2303         (*thrd_last_action)[tid] = act;
2304         if (uninit)
2305                 (*thrd_last_action)[uninit_id] = uninit;
2306
2307         if (act->is_fence() && act->is_release()) {
2308                 if ((int)thrd_last_fence_release->size() <= tid)
2309                         thrd_last_fence_release->resize(get_num_threads());
2310                 (*thrd_last_fence_release)[tid] = act;
2311         }
2312
2313         if (act->is_wait()) {
2314                 void *mutex_loc = (void *) act->get_value();
2315                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2316
2317                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2318                 if (tid >= (int)vec->size())
2319                         vec->resize(priv->next_thread_id);
2320                 (*vec)[tid].push_back(act);
2321         }
2322 }
2323
2324 /**
2325  * @brief Get the last action performed by a particular Thread
2326  * @param tid The thread ID of the Thread in question
2327  * @return The last action in the thread
2328  */
2329 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2330 {
2331         int threadid = id_to_int(tid);
2332         if (threadid < (int)thrd_last_action->size())
2333                 return (*thrd_last_action)[id_to_int(tid)];
2334         else
2335                 return NULL;
2336 }
2337
2338 /**
2339  * @brief Get the last fence release performed by a particular Thread
2340  * @param tid The thread ID of the Thread in question
2341  * @return The last fence release in the thread, if one exists; NULL otherwise
2342  */
2343 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2344 {
2345         int threadid = id_to_int(tid);
2346         if (threadid < (int)thrd_last_fence_release->size())
2347                 return (*thrd_last_fence_release)[id_to_int(tid)];
2348         else
2349                 return NULL;
2350 }
2351
2352 /**
2353  * Gets the last memory_order_seq_cst write (in the total global sequence)
2354  * performed on a particular object (i.e., memory location), not including the
2355  * current action.
2356  * @param curr The current ModelAction; also denotes the object location to
2357  * check
2358  * @return The last seq_cst write
2359  */
2360 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2361 {
2362         void *location = curr->get_location();
2363         action_list_t *list = get_safe_ptr_action(obj_map, location);
2364         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2365         action_list_t::reverse_iterator rit;
2366         for (rit = list->rbegin(); rit != list->rend(); rit++)
2367                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2368                         return *rit;
2369         return NULL;
2370 }
2371
2372 /**
2373  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2374  * performed in a particular thread, prior to a particular fence.
2375  * @param tid The ID of the thread to check
2376  * @param before_fence The fence from which to begin the search; if NULL, then
2377  * search for the most recent fence in the thread.
2378  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2379  */
2380 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2381 {
2382         /* All fences should have NULL location */
2383         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2384         action_list_t::reverse_iterator rit = list->rbegin();
2385
2386         if (before_fence) {
2387                 for (; rit != list->rend(); rit++)
2388                         if (*rit == before_fence)
2389                                 break;
2390
2391                 ASSERT(*rit == before_fence);
2392                 rit++;
2393         }
2394
2395         for (; rit != list->rend(); rit++)
2396                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2397                         return *rit;
2398         return NULL;
2399 }
2400
2401 /**
2402  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2403  * location). This function identifies the mutex according to the current
2404  * action, which is presumed to perform on the same mutex.
2405  * @param curr The current ModelAction; also denotes the object location to
2406  * check
2407  * @return The last unlock operation
2408  */
2409 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2410 {
2411         void *location = curr->get_location();
2412         action_list_t *list = get_safe_ptr_action(obj_map, location);
2413         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2414         action_list_t::reverse_iterator rit;
2415         for (rit = list->rbegin(); rit != list->rend(); rit++)
2416                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2417                         return *rit;
2418         return NULL;
2419 }
2420
2421 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2422 {
2423         ModelAction *parent = get_last_action(tid);
2424         if (!parent)
2425                 parent = get_thread(tid)->get_creation();
2426         return parent;
2427 }
2428
2429 /**
2430  * Returns the clock vector for a given thread.
2431  * @param tid The thread whose clock vector we want
2432  * @return Desired clock vector
2433  */
2434 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2435 {
2436         return get_parent_action(tid)->get_cv();
2437 }
2438
2439 /**
2440  * Resolve a set of Promises with a current write. The set is provided in the
2441  * Node corresponding to @a write.
2442  * @param write The ModelAction that is fulfilling Promises
2443  * @return True if promises were resolved; false otherwise
2444  */
2445 bool ModelChecker::resolve_promises(ModelAction *write)
2446 {
2447         bool haveResolved = false;
2448         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2449         promise_list_t mustResolve, resolved;
2450
2451         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2452                 Promise *promise = (*promises)[promise_index];
2453                 if (write->get_node()->get_promise(i)) {
2454                         ModelAction *read = promise->get_action();
2455                         read_from(read, write);
2456                         //Make sure the promise's value matches the write's value
2457                         ASSERT(promise->is_compatible(write));
2458                         mo_graph->resolvePromise(promise, write, &mustResolve);
2459
2460                         resolved.push_back(promise);
2461                         promises->erase(promises->begin() + promise_index);
2462                         actions_to_check.push_back(read);
2463
2464                         haveResolved = true;
2465                 } else
2466                         promise_index++;
2467         }
2468
2469         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2470                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2471                                 == resolved.end())
2472                         priv->failed_promise = true;
2473         }
2474         for (unsigned int i = 0; i < resolved.size(); i++)
2475                 delete resolved[i];
2476         //Check whether reading these writes has made threads unable to
2477         //resolve promises
2478
2479         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2480                 ModelAction *read = actions_to_check[i];
2481                 mo_check_promises(read, true);
2482         }
2483
2484         return haveResolved;
2485 }
2486
2487 /**
2488  * Compute the set of promises that could potentially be satisfied by this
2489  * action. Note that the set computation actually appears in the Node, not in
2490  * ModelChecker.
2491  * @param curr The ModelAction that may satisfy promises
2492  */
2493 void ModelChecker::compute_promises(ModelAction *curr)
2494 {
2495         for (unsigned int i = 0; i < promises->size(); i++) {
2496                 Promise *promise = (*promises)[i];
2497                 const ModelAction *act = promise->get_action();
2498                 ASSERT(act->is_read());
2499                 if (!act->happens_before(curr) &&
2500                                 !act->could_synchronize_with(curr) &&
2501                                 promise->is_compatible(curr) &&
2502                                 promise->get_value() == curr->get_value()) {
2503                         curr->get_node()->set_promise(i);
2504                 }
2505         }
2506 }
2507
2508 /** Checks promises in response to change in ClockVector Threads. */
2509 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2510 {
2511         for (unsigned int i = 0; i < promises->size(); i++) {
2512                 Promise *promise = (*promises)[i];
2513                 const ModelAction *act = promise->get_action();
2514                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2515                                 merge_cv->synchronized_since(act)) {
2516                         if (promise->eliminate_thread(tid)) {
2517                                 //Promise has failed
2518                                 priv->failed_promise = true;
2519                                 return;
2520                         }
2521                 }
2522         }
2523 }
2524
2525 void ModelChecker::check_promises_thread_disabled()
2526 {
2527         for (unsigned int i = 0; i < promises->size(); i++) {
2528                 Promise *promise = (*promises)[i];
2529                 if (promise->has_failed()) {
2530                         priv->failed_promise = true;
2531                         return;
2532                 }
2533         }
2534 }
2535
2536 /**
2537  * @brief Checks promises in response to addition to modification order for
2538  * threads.
2539  *
2540  * We test whether threads are still available for satisfying promises after an
2541  * addition to our modification order constraints. Those that are unavailable
2542  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2543  * that promise has failed.
2544  *
2545  * @param act The ModelAction which updated the modification order
2546  * @param is_read_check Should be true if act is a read and we must check for
2547  * updates to the store from which it read (there is a distinction here for
2548  * RMW's, which are both a load and a store)
2549  */
2550 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2551 {
2552         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2553
2554         for (unsigned int i = 0; i < promises->size(); i++) {
2555                 Promise *promise = (*promises)[i];
2556                 const ModelAction *pread = promise->get_action();
2557
2558                 // Is this promise on the same location?
2559                 if (!pread->same_var(write))
2560                         continue;
2561
2562                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2563                         priv->failed_promise = true;
2564                         return;
2565                 }
2566
2567                 // Don't do any lookups twice for the same thread
2568                 if (!promise->thread_is_available(act->get_tid()))
2569                         continue;
2570
2571                 if (mo_graph->checkReachable(promise, write)) {
2572                         if (mo_graph->checkPromise(write, promise)) {
2573                                 priv->failed_promise = true;
2574                                 return;
2575                         }
2576                 }
2577         }
2578 }
2579
2580 /**
2581  * Compute the set of writes that may break the current pending release
2582  * sequence. This information is extracted from previou release sequence
2583  * calculations.
2584  *
2585  * @param curr The current ModelAction. Must be a release sequence fixup
2586  * action.
2587  */
2588 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2589 {
2590         if (pending_rel_seqs->empty())
2591                 return;
2592
2593         struct release_seq *pending = pending_rel_seqs->back();
2594         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2595                 const ModelAction *write = pending->writes[i];
2596                 curr->get_node()->add_relseq_break(write);
2597         }
2598
2599         /* NULL means don't break the sequence; just synchronize */
2600         curr->get_node()->add_relseq_break(NULL);
2601 }
2602
2603 /**
2604  * Build up an initial set of all past writes that this 'read' action may read
2605  * from, as well as any previously-observed future values that must still be valid.
2606  *
2607  * @param curr is the current ModelAction that we are exploring; it must be a
2608  * 'read' operation.
2609  */
2610 void ModelChecker::build_may_read_from(ModelAction *curr)
2611 {
2612         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2613         unsigned int i;
2614         ASSERT(curr->is_read());
2615
2616         ModelAction *last_sc_write = NULL;
2617
2618         if (curr->is_seqcst())
2619                 last_sc_write = get_last_seq_cst_write(curr);
2620
2621         /* Iterate over all threads */
2622         for (i = 0; i < thrd_lists->size(); i++) {
2623                 /* Iterate over actions in thread, starting from most recent */
2624                 action_list_t *list = &(*thrd_lists)[i];
2625                 action_list_t::reverse_iterator rit;
2626                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2627                         ModelAction *act = *rit;
2628
2629                         /* Only consider 'write' actions */
2630                         if (!act->is_write() || act == curr)
2631                                 continue;
2632
2633                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2634                         bool allow_read = true;
2635
2636                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2637                                 allow_read = false;
2638                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2639                                 allow_read = false;
2640
2641                         if (allow_read) {
2642                                 /* Only add feasible reads */
2643                                 mo_graph->startChanges();
2644                                 r_modification_order(curr, act);
2645                                 if (!is_infeasible())
2646                                         curr->get_node()->add_read_from_past(act);
2647                                 mo_graph->rollbackChanges();
2648                         }
2649
2650                         /* Include at most one act per-thread that "happens before" curr */
2651                         if (act->happens_before(curr))
2652                                 break;
2653                 }
2654         }
2655
2656         /* Inherit existing, promised future values */
2657         for (i = 0; i < promises->size(); i++) {
2658                 const Promise *promise = (*promises)[i];
2659                 const ModelAction *promise_read = promise->get_action();
2660                 if (promise_read->same_var(curr)) {
2661                         /* Only add feasible future-values */
2662                         mo_graph->startChanges();
2663                         r_modification_order(curr, promise);
2664                         if (!is_infeasible())
2665                                 curr->get_node()->add_read_from_promise(promise_read);
2666                         mo_graph->rollbackChanges();
2667                 }
2668         }
2669
2670         /* We may find no valid may-read-from only if the execution is doomed */
2671         if (!curr->get_node()->read_from_size()) {
2672                 priv->no_valid_reads = true;
2673                 set_assert();
2674         }
2675
2676         if (DBG_ENABLED()) {
2677                 model_print("Reached read action:\n");
2678                 curr->print();
2679                 model_print("Printing read_from_past\n");
2680                 curr->get_node()->print_read_from_past();
2681                 model_print("End printing read_from_past\n");
2682         }
2683 }
2684
2685 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2686 {
2687         for ( ; write != NULL; write = write->get_reads_from()) {
2688                 /* UNINIT actions don't have a Node, and they never sleep */
2689                 if (write->is_uninitialized())
2690                         return true;
2691                 Node *prevnode = write->get_node()->get_parent();
2692
2693                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2694                 if (write->is_release() && thread_sleep)
2695                         return true;
2696                 if (!write->is_rmw())
2697                         return false;
2698         }
2699         return true;
2700 }
2701
2702 /**
2703  * @brief Create a new action representing an uninitialized atomic
2704  * @param location The memory location of the atomic object
2705  * @return A pointer to a new ModelAction
2706  */
2707 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2708 {
2709         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2710         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2711         act->create_cv(NULL);
2712         return act;
2713 }
2714
2715 static void print_list(action_list_t *list)
2716 {
2717         action_list_t::iterator it;
2718
2719         model_print("---------------------------------------------------------------------\n");
2720
2721         unsigned int hash = 0;
2722
2723         for (it = list->begin(); it != list->end(); it++) {
2724                 (*it)->print();
2725                 hash = hash^(hash<<3)^((*it)->hash());
2726         }
2727         model_print("HASH %u\n", hash);
2728         model_print("---------------------------------------------------------------------\n");
2729 }
2730
2731 #if SUPPORT_MOD_ORDER_DUMP
2732 void ModelChecker::dumpGraph(char *filename) const
2733 {
2734         char buffer[200];
2735         sprintf(buffer, "%s.dot", filename);
2736         FILE *file = fopen(buffer, "w");
2737         fprintf(file, "digraph %s {\n", filename);
2738         mo_graph->dumpNodes(file);
2739         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2740
2741         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2742                 ModelAction *action = *it;
2743                 if (action->is_read()) {
2744                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2745                         if (action->get_reads_from() != NULL)
2746                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2747                 }
2748                 if (thread_array[action->get_tid()] != NULL) {
2749                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2750                 }
2751
2752                 thread_array[action->get_tid()] = action;
2753         }
2754         fprintf(file, "}\n");
2755         model_free(thread_array);
2756         fclose(file);
2757 }
2758 #endif
2759
2760 /** @brief Prints an execution trace summary. */
2761 void ModelChecker::print_summary() const
2762 {
2763 #if SUPPORT_MOD_ORDER_DUMP
2764         char buffername[100];
2765         sprintf(buffername, "exec%04u", stats.num_total);
2766         mo_graph->dumpGraphToFile(buffername);
2767         sprintf(buffername, "graph%04u", stats.num_total);
2768         dumpGraph(buffername);
2769 #endif
2770
2771         model_print("Execution %d:", stats.num_total);
2772         if (isfeasibleprefix())
2773                 model_print("\n");
2774         else
2775                 print_infeasibility(" INFEASIBLE");
2776         print_list(action_trace);
2777         model_print("\n");
2778 }
2779
2780 /**
2781  * Add a Thread to the system for the first time. Should only be called once
2782  * per thread.
2783  * @param t The Thread to add
2784  */
2785 void ModelChecker::add_thread(Thread *t)
2786 {
2787         thread_map->put(id_to_int(t->get_id()), t);
2788         scheduler->add_thread(t);
2789 }
2790
2791 /**
2792  * Removes a thread from the scheduler.
2793  * @param the thread to remove.
2794  */
2795 void ModelChecker::remove_thread(Thread *t)
2796 {
2797         scheduler->remove_thread(t);
2798 }
2799
2800 /**
2801  * @brief Get a Thread reference by its ID
2802  * @param tid The Thread's ID
2803  * @return A Thread reference
2804  */
2805 Thread * ModelChecker::get_thread(thread_id_t tid) const
2806 {
2807         return thread_map->get(id_to_int(tid));
2808 }
2809
2810 /**
2811  * @brief Get a reference to the Thread in which a ModelAction was executed
2812  * @param act The ModelAction
2813  * @return A Thread reference
2814  */
2815 Thread * ModelChecker::get_thread(const ModelAction *act) const
2816 {
2817         return get_thread(act->get_tid());
2818 }
2819
2820 /**
2821  * @brief Check if a Thread is currently enabled
2822  * @param t The Thread to check
2823  * @return True if the Thread is currently enabled
2824  */
2825 bool ModelChecker::is_enabled(Thread *t) const
2826 {
2827         return scheduler->is_enabled(t);
2828 }
2829
2830 /**
2831  * @brief Check if a Thread is currently enabled
2832  * @param tid The ID of the Thread to check
2833  * @return True if the Thread is currently enabled
2834  */
2835 bool ModelChecker::is_enabled(thread_id_t tid) const
2836 {
2837         return scheduler->is_enabled(tid);
2838 }
2839
2840 /**
2841  * Switch from a model-checker context to a user-thread context. This is the
2842  * complement of ModelChecker::switch_to_master and must be called from the
2843  * model-checker context
2844  *
2845  * @param thread The user-thread to switch to
2846  */
2847 void ModelChecker::switch_from_master(Thread *thread)
2848 {
2849         scheduler->set_current_thread(thread);
2850         Thread::swap(&system_context, thread);
2851 }
2852
2853 /**
2854  * Switch from a user-context to the "master thread" context (a.k.a. system
2855  * context). This switch is made with the intention of exploring a particular
2856  * model-checking action (described by a ModelAction object). Must be called
2857  * from a user-thread context.
2858  *
2859  * @param act The current action that will be explored. May be NULL only if
2860  * trace is exiting via an assertion (see ModelChecker::set_assert and
2861  * ModelChecker::has_asserted).
2862  * @return Return the value returned by the current action
2863  */
2864 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2865 {
2866         DBG();
2867         Thread *old = thread_current();
2868         ASSERT(!old->get_pending());
2869         old->set_pending(act);
2870         if (Thread::swap(old, &system_context) < 0) {
2871                 perror("swap threads");
2872                 exit(EXIT_FAILURE);
2873         }
2874         return old->get_return_value();
2875 }
2876
2877 /**
2878  * Takes the next step in the execution, if possible.
2879  * @param curr The current step to take
2880  * @return Returns the next Thread to run, if any; NULL if this execution
2881  * should terminate
2882  */
2883 Thread * ModelChecker::take_step(ModelAction *curr)
2884 {
2885         Thread *curr_thrd = get_thread(curr);
2886         ASSERT(curr_thrd->get_state() == THREAD_READY);
2887
2888         curr = check_current_action(curr);
2889
2890         /* Infeasible -> don't take any more steps */
2891         if (is_infeasible())
2892                 return NULL;
2893         else if (isfeasibleprefix() && have_bug_reports()) {
2894                 set_assert();
2895                 return NULL;
2896         }
2897
2898         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2899                 return NULL;
2900
2901         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2902                 scheduler->remove_thread(curr_thrd);
2903
2904         Thread *next_thrd = get_next_thread(curr);
2905
2906         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2907                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2908
2909         return next_thrd;
2910 }
2911
2912 /** Wrapper to run the user's main function, with appropriate arguments */
2913 void user_main_wrapper(void *)
2914 {
2915         user_main(model->params.argc, model->params.argv);
2916 }
2917
2918 /** @brief Run ModelChecker for the user program */
2919 void ModelChecker::run()
2920 {
2921         do {
2922                 thrd_t user_thread;
2923                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2924                 add_thread(t);
2925
2926                 do {
2927                         /*
2928                          * Stash next pending action(s) for thread(s). There
2929                          * should only need to stash one thread's action--the
2930                          * thread which just took a step--plus the first step
2931                          * for any newly-created thread
2932                          */
2933                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2934                                 thread_id_t tid = int_to_id(i);
2935                                 Thread *thr = get_thread(tid);
2936                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2937                                         switch_from_master(thr);
2938                                 }
2939                         }
2940
2941                         /* Catch assertions from prior take_step or from
2942                          * between-ModelAction bugs (e.g., data races) */
2943                         if (has_asserted())
2944                                 break;
2945
2946                         /* Consume the next action for a Thread */
2947                         ModelAction *curr = t->get_pending();
2948                         t->set_pending(NULL);
2949                         t = take_step(curr);
2950                 } while (t && !t->is_model_thread());
2951
2952                 /*
2953                  * Launch end-of-execution release sequence fixups only when
2954                  * the execution is otherwise feasible AND there are:
2955                  *
2956                  * (1) pending release sequences
2957                  * (2) pending assertions that could be invalidated by a change
2958                  * in clock vectors (i.e., data races)
2959                  * (3) no pending promises
2960                  */
2961                 while (!pending_rel_seqs->empty() &&
2962                                 is_feasible_prefix_ignore_relseq() &&
2963                                 !unrealizedraces.empty()) {
2964                         model_print("*** WARNING: release sequence fixup action "
2965                                         "(%zu pending release seuqence(s)) ***\n",
2966                                         pending_rel_seqs->size());
2967                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2968                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2969                                         model_thread);
2970                         take_step(fixup);
2971                 };
2972         } while (next_execution());
2973
2974         model_print("******* Model-checking complete: *******\n");
2975         print_stats();
2976 }