towards supporting scanalysis...
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "snapshot-interface.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "output.h"
19 #include "traceanalysis.h"
20
21 #define INITIAL_THREAD_ID       0
22
23 ModelChecker *model;
24
25 struct bug_message {
26         bug_message(const char *str) {
27                 const char *fmt = "  [BUG] %s\n";
28                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
29                 sprintf(msg, fmt, str);
30         }
31         ~bug_message() { if (msg) snapshot_free(msg); }
32
33         char *msg;
34         void print() { model_print("%s", msg); }
35
36         SNAPSHOTALLOC
37 };
38
39 /**
40  * Structure for holding small ModelChecker members that should be snapshotted
41  */
42 struct model_snapshot_members {
43         model_snapshot_members() :
44                 /* First thread created will have id INITIAL_THREAD_ID */
45                 next_thread_id(INITIAL_THREAD_ID),
46                 used_sequence_numbers(0),
47                 next_backtrack(NULL),
48                 bugs(),
49                 stats(),
50                 failed_promise(false),
51                 too_many_reads(false),
52                 no_valid_reads(false),
53                 bad_synchronization(false),
54                 asserted(false)
55         { }
56
57         ~model_snapshot_members() {
58                 for (unsigned int i = 0; i < bugs.size(); i++)
59                         delete bugs[i];
60                 bugs.clear();
61         }
62
63         unsigned int next_thread_id;
64         modelclock_t used_sequence_numbers;
65         ModelAction *next_backtrack;
66         SnapVector<bug_message *> bugs;
67         struct execution_stats stats;
68         bool failed_promise;
69         bool too_many_reads;
70         bool no_valid_reads;
71         /** @brief Incorrectly-ordered synchronization was made */
72         bool bad_synchronization;
73         bool asserted;
74
75         SNAPSHOTALLOC
76 };
77
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80         /* Initialize default scheduler */
81         params(params),
82         scheduler(new Scheduler()),
83         diverge(NULL),
84         earliest_diverge(NULL),
85         action_trace(new action_list_t()),
86         thread_map(new HashTable<int, Thread *, int>()),
87         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
90         promises(new SnapVector<Promise *>()),
91         futurevalues(new SnapVector<struct PendingFutureValue>()),
92         pending_rel_seqs(new SnapVector<struct release_seq *>()),
93         thrd_last_action(new SnapVector<ModelAction *>(1)),
94         thrd_last_fence_release(new SnapVector<ModelAction *>()),
95         node_stack(new NodeStack()),
96         trace_analyses(new ModelVector<Trace_Analysis *>()),
97         priv(new struct model_snapshot_members()),
98         mo_graph(new CycleGraph())
99 {
100         /* Initialize a model-checker thread, for special ModelActions */
101         model_thread = new Thread(get_next_id());
102         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 }
104
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
107 {
108         for (unsigned int i = 0; i < get_num_threads(); i++)
109                 delete thread_map->get(i);
110         delete thread_map;
111
112         delete obj_thrd_map;
113         delete obj_map;
114         delete condvar_waiters_map;
115         delete action_trace;
116
117         for (unsigned int i = 0; i < promises->size(); i++)
118                 delete (*promises)[i];
119         delete promises;
120
121         delete pending_rel_seqs;
122
123         delete thrd_last_action;
124         delete thrd_last_fence_release;
125         delete node_stack;
126         for (unsigned int i = 0; i <trace_analyses->size();i++)
127                 delete (*trace_analyses)[i];
128         delete trace_analyses;
129         delete scheduler;
130         delete mo_graph;
131         delete priv;
132 }
133
134 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
135 {
136         action_list_t *tmp = hash->get(ptr);
137         if (tmp == NULL) {
138                 tmp = new action_list_t();
139                 hash->put(ptr, tmp);
140         }
141         return tmp;
142 }
143
144 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
145 {
146         SnapVector<action_list_t> *tmp = hash->get(ptr);
147         if (tmp == NULL) {
148                 tmp = new SnapVector<action_list_t>();
149                 hash->put(ptr, tmp);
150         }
151         return tmp;
152 }
153
154 action_list_t * ModelChecker::get_actions_on_obj(void * obj, thread_id_t tid) {
155         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
156         if (wrv==NULL)
157                 return NULL;
158         unsigned int thread=id_to_int(tid);
159         if (thread < wrv->size())
160                 return &(*wrv)[thread];
161         else
162                 return NULL;
163 }
164
165
166 /**
167  * Restores user program to initial state and resets all model-checker data
168  * structures.
169  */
170 void ModelChecker::reset_to_initial_state()
171 {
172         DEBUG("+++ Resetting to initial state +++\n");
173         node_stack->reset_execution();
174
175         /**
176          * FIXME: if we utilize partial rollback, we will need to free only
177          * those pending actions which were NOT pending before the rollback
178          * point
179          */
180         for (unsigned int i = 0; i < get_num_threads(); i++)
181                 delete get_thread(int_to_id(i))->get_pending();
182
183         snapshot_backtrack_before(0);
184 }
185
186 /** @return a thread ID for a new Thread */
187 thread_id_t ModelChecker::get_next_id()
188 {
189         return priv->next_thread_id++;
190 }
191
192 /** @return the number of user threads created during this execution */
193 unsigned int ModelChecker::get_num_threads() const
194 {
195         return priv->next_thread_id;
196 }
197
198 /**
199  * Must be called from user-thread context (e.g., through the global
200  * thread_current() interface)
201  *
202  * @return The currently executing Thread.
203  */
204 Thread * ModelChecker::get_current_thread() const
205 {
206         return scheduler->get_current_thread();
207 }
208
209 /** @return a sequence number for a new ModelAction */
210 modelclock_t ModelChecker::get_next_seq_num()
211 {
212         return ++priv->used_sequence_numbers;
213 }
214
215 Node * ModelChecker::get_curr_node() const
216 {
217         return node_stack->get_head();
218 }
219
220 /**
221  * @brief Select the next thread to execute based on the curren action
222  *
223  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
224  * actions should be followed by the execution of their child thread. In either
225  * case, the current action should determine the next thread schedule.
226  *
227  * @param curr The current action
228  * @return The next thread to run, if the current action will determine this
229  * selection; otherwise NULL
230  */
231 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
232 {
233         /* Do not split atomic RMW */
234         if (curr->is_rmwr())
235                 return get_thread(curr);
236         /* Follow CREATE with the created thread */
237         if (curr->get_type() == THREAD_CREATE)
238                 return curr->get_thread_operand();
239         return NULL;
240 }
241
242 /**
243  * @brief Choose the next thread to execute.
244  *
245  * This function chooses the next thread that should execute. It can enforce
246  * execution replay/backtracking or, if the model-checker has no preference
247  * regarding the next thread (i.e., when exploring a new execution ordering),
248  * we defer to the scheduler.
249  *
250  * @return The next chosen thread to run, if any exist. Or else if the current
251  * execution should terminate, return NULL.
252  */
253 Thread * ModelChecker::get_next_thread()
254 {
255         thread_id_t tid;
256
257         /*
258          * Have we completed exploring the preselected path? Then let the
259          * scheduler decide
260          */
261         if (diverge == NULL)
262                 return scheduler->select_next_thread();
263
264         /* Else, we are trying to replay an execution */
265         ModelAction *next = node_stack->get_next()->get_action();
266
267         if (next == diverge) {
268                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
269                         earliest_diverge = diverge;
270
271                 Node *nextnode = next->get_node();
272                 Node *prevnode = nextnode->get_parent();
273                 scheduler->update_sleep_set(prevnode);
274
275                 /* Reached divergence point */
276                 if (nextnode->increment_behaviors()) {
277                         /* Execute the same thread with a new behavior */
278                         tid = next->get_tid();
279                         node_stack->pop_restofstack(2);
280                 } else {
281                         ASSERT(prevnode);
282                         /* Make a different thread execute for next step */
283                         scheduler->add_sleep(get_thread(next->get_tid()));
284                         tid = prevnode->get_next_backtrack();
285                         /* Make sure the backtracked thread isn't sleeping. */
286                         node_stack->pop_restofstack(1);
287                         if (diverge == earliest_diverge) {
288                                 earliest_diverge = prevnode->get_action();
289                         }
290                 }
291                 /* Start the round robin scheduler from this thread id */
292                 scheduler->set_scheduler_thread(tid);
293                 /* The correct sleep set is in the parent node. */
294                 execute_sleep_set();
295
296                 DEBUG("*** Divergence point ***\n");
297
298                 diverge = NULL;
299         } else {
300                 tid = next->get_tid();
301         }
302         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
303         ASSERT(tid != THREAD_ID_T_NONE);
304         return get_thread(id_to_int(tid));
305 }
306
307 /**
308  * We need to know what the next actions of all threads in the sleep
309  * set will be.  This method computes them and stores the actions at
310  * the corresponding thread object's pending action.
311  */
312
313 void ModelChecker::execute_sleep_set()
314 {
315         for (unsigned int i = 0; i < get_num_threads(); i++) {
316                 thread_id_t tid = int_to_id(i);
317                 Thread *thr = get_thread(tid);
318                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
319                         thr->get_pending()->set_sleep_flag();
320                 }
321         }
322 }
323
324 /**
325  * @brief Should the current action wake up a given thread?
326  *
327  * @param curr The current action
328  * @param thread The thread that we might wake up
329  * @return True, if we should wake up the sleeping thread; false otherwise
330  */
331 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
332 {
333         const ModelAction *asleep = thread->get_pending();
334         /* Don't allow partial RMW to wake anyone up */
335         if (curr->is_rmwr())
336                 return false;
337         /* Synchronizing actions may have been backtracked */
338         if (asleep->could_synchronize_with(curr))
339                 return true;
340         /* All acquire/release fences and fence-acquire/store-release */
341         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
342                 return true;
343         /* Fence-release + store can awake load-acquire on the same location */
344         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
345                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
346                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
347                         return true;
348         }
349         return false;
350 }
351
352 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
353 {
354         for (unsigned int i = 0; i < get_num_threads(); i++) {
355                 Thread *thr = get_thread(int_to_id(i));
356                 if (scheduler->is_sleep_set(thr)) {
357                         if (should_wake_up(curr, thr))
358                                 /* Remove this thread from sleep set */
359                                 scheduler->remove_sleep(thr);
360                 }
361         }
362 }
363
364 /** @brief Alert the model-checker that an incorrectly-ordered
365  * synchronization was made */
366 void ModelChecker::set_bad_synchronization()
367 {
368         priv->bad_synchronization = true;
369 }
370
371 /**
372  * Check whether the current trace has triggered an assertion which should halt
373  * its execution.
374  *
375  * @return True, if the execution should be aborted; false otherwise
376  */
377 bool ModelChecker::has_asserted() const
378 {
379         return priv->asserted;
380 }
381
382 /**
383  * Trigger a trace assertion which should cause this execution to be halted.
384  * This can be due to a detected bug or due to an infeasibility that should
385  * halt ASAP.
386  */
387 void ModelChecker::set_assert()
388 {
389         priv->asserted = true;
390 }
391
392 /**
393  * Check if we are in a deadlock. Should only be called at the end of an
394  * execution, although it should not give false positives in the middle of an
395  * execution (there should be some ENABLED thread).
396  *
397  * @return True if program is in a deadlock; false otherwise
398  */
399 bool ModelChecker::is_deadlocked() const
400 {
401         bool blocking_threads = false;
402         for (unsigned int i = 0; i < get_num_threads(); i++) {
403                 thread_id_t tid = int_to_id(i);
404                 if (is_enabled(tid))
405                         return false;
406                 Thread *t = get_thread(tid);
407                 if (!t->is_model_thread() && t->get_pending())
408                         blocking_threads = true;
409         }
410         return blocking_threads;
411 }
412
413 /**
414  * Check if this is a complete execution. That is, have all thread completed
415  * execution (rather than exiting because sleep sets have forced a redundant
416  * execution).
417  *
418  * @return True if the execution is complete.
419  */
420 bool ModelChecker::is_complete_execution() const
421 {
422         for (unsigned int i = 0; i < get_num_threads(); i++)
423                 if (is_enabled(int_to_id(i)))
424                         return false;
425         return true;
426 }
427
428 /**
429  * @brief Assert a bug in the executing program.
430  *
431  * Use this function to assert any sort of bug in the user program. If the
432  * current trace is feasible (actually, a prefix of some feasible execution),
433  * then this execution will be aborted, printing the appropriate message. If
434  * the current trace is not yet feasible, the error message will be stashed and
435  * printed if the execution ever becomes feasible.
436  *
437  * @param msg Descriptive message for the bug (do not include newline char)
438  * @return True if bug is immediately-feasible
439  */
440 bool ModelChecker::assert_bug(const char *msg, ...)
441 {
442         char str[800];
443
444         va_list ap;
445         va_start(ap, msg);
446         vsnprintf(str, sizeof(str), msg, ap);
447         va_end(ap);
448
449         priv->bugs.push_back(new bug_message(str));
450
451         if (isfeasibleprefix()) {
452                 set_assert();
453                 return true;
454         }
455         return false;
456 }
457
458 /**
459  * @brief Assert a bug in the executing program, asserted by a user thread
460  * @see ModelChecker::assert_bug
461  * @param msg Descriptive message for the bug (do not include newline char)
462  */
463 void ModelChecker::assert_user_bug(const char *msg)
464 {
465         /* If feasible bug, bail out now */
466         if (assert_bug(msg))
467                 switch_to_master(NULL);
468 }
469
470 /** @return True, if any bugs have been reported for this execution */
471 bool ModelChecker::have_bug_reports() const
472 {
473         return priv->bugs.size() != 0;
474 }
475
476 /** @brief Print bug report listing for this execution (if any bugs exist) */
477 void ModelChecker::print_bugs() const
478 {
479         if (have_bug_reports()) {
480                 model_print("Bug report: %zu bug%s detected\n",
481                                 priv->bugs.size(),
482                                 priv->bugs.size() > 1 ? "s" : "");
483                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
484                         priv->bugs[i]->print();
485         }
486 }
487
488 /**
489  * @brief Record end-of-execution stats
490  *
491  * Must be run when exiting an execution. Records various stats.
492  * @see struct execution_stats
493  */
494 void ModelChecker::record_stats()
495 {
496         stats.num_total++;
497         if (!isfeasibleprefix())
498                 stats.num_infeasible++;
499         else if (have_bug_reports())
500                 stats.num_buggy_executions++;
501         else if (is_complete_execution())
502                 stats.num_complete++;
503         else {
504                 stats.num_redundant++;
505
506                 /**
507                  * @todo We can violate this ASSERT() when fairness/sleep sets
508                  * conflict to cause an execution to terminate, e.g. with:
509                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
510                  */
511                 //ASSERT(scheduler->all_threads_sleeping());
512         }
513 }
514
515 /** @brief Print execution stats */
516 void ModelChecker::print_stats() const
517 {
518         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
519         model_print("Number of redundant executions: %d\n", stats.num_redundant);
520         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
521         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
522         model_print("Total executions: %d\n", stats.num_total);
523         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
524 }
525
526 /**
527  * @brief End-of-exeuction print
528  * @param printbugs Should any existing bugs be printed?
529  */
530 void ModelChecker::print_execution(bool printbugs) const
531 {
532         print_program_output();
533
534         if (params.verbose) {
535                 model_print("Earliest divergence point since last feasible execution:\n");
536                 if (earliest_diverge)
537                         earliest_diverge->print();
538                 else
539                         model_print("(Not set)\n");
540
541                 model_print("\n");
542                 print_stats();
543         }
544
545         /* Don't print invalid bugs */
546         if (printbugs)
547                 print_bugs();
548
549         model_print("\n");
550         print_summary();
551 }
552
553 /**
554  * Queries the model-checker for more executions to explore and, if one
555  * exists, resets the model-checker state to execute a new execution.
556  *
557  * @return If there are more executions to explore, return true. Otherwise,
558  * return false.
559  */
560 bool ModelChecker::next_execution()
561 {
562         DBG();
563         /* Is this execution a feasible execution that's worth bug-checking? */
564         bool complete = isfeasibleprefix() && (is_complete_execution() ||
565                         have_bug_reports());
566
567         /* End-of-execution bug checks */
568         if (complete) {
569                 if (is_deadlocked())
570                         assert_bug("Deadlock detected");
571
572                 checkDataRaces();
573                 run_trace_analyses();
574         }
575
576         record_stats();
577
578         /* Output */
579         if (params.verbose || (complete && have_bug_reports()))
580                 print_execution(complete);
581         else
582                 clear_program_output();
583
584         if (complete)
585                 earliest_diverge = NULL;
586
587         if ((diverge = get_next_backtrack()) == NULL)
588                 return false;
589
590         if (DBG_ENABLED()) {
591                 model_print("Next execution will diverge at:\n");
592                 diverge->print();
593         }
594
595         reset_to_initial_state();
596         return true;
597 }
598
599 /**
600  * @brief Run trace analyses on complete trace. */
601
602 void ModelChecker::run_trace_analyses() {
603         for(unsigned int i=0; i < trace_analyses->size(); i++) {
604                 (*trace_analyses)[i]->analyze(action_trace);
605         }
606 }
607
608 /**
609  * @brief Find the last fence-related backtracking conflict for a ModelAction
610  *
611  * This function performs the search for the most recent conflicting action
612  * against which we should perform backtracking, as affected by fence
613  * operations. This includes pairs of potentially-synchronizing actions which
614  * occur due to fence-acquire or fence-release, and hence should be explored in
615  * the opposite execution order.
616  *
617  * @param act The current action
618  * @return The most recent action which conflicts with act due to fences
619  */
620 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
621 {
622         /* Only perform release/acquire fence backtracking for stores */
623         if (!act->is_write())
624                 return NULL;
625
626         /* Find a fence-release (or, act is a release) */
627         ModelAction *last_release;
628         if (act->is_release())
629                 last_release = act;
630         else
631                 last_release = get_last_fence_release(act->get_tid());
632         if (!last_release)
633                 return NULL;
634
635         /* Skip past the release */
636         action_list_t *list = action_trace;
637         action_list_t::reverse_iterator rit;
638         for (rit = list->rbegin(); rit != list->rend(); rit++)
639                 if (*rit == last_release)
640                         break;
641         ASSERT(rit != list->rend());
642
643         /* Find a prior:
644          *   load-acquire
645          * or
646          *   load --sb-> fence-acquire */
647         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
648         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
649         bool found_acquire_fences = false;
650         for ( ; rit != list->rend(); rit++) {
651                 ModelAction *prev = *rit;
652                 if (act->same_thread(prev))
653                         continue;
654
655                 int tid = id_to_int(prev->get_tid());
656
657                 if (prev->is_read() && act->same_var(prev)) {
658                         if (prev->is_acquire()) {
659                                 /* Found most recent load-acquire, don't need
660                                  * to search for more fences */
661                                 if (!found_acquire_fences)
662                                         return NULL;
663                         } else {
664                                 prior_loads[tid] = prev;
665                         }
666                 }
667                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
668                         found_acquire_fences = true;
669                         acquire_fences[tid] = prev;
670                 }
671         }
672
673         ModelAction *latest_backtrack = NULL;
674         for (unsigned int i = 0; i < acquire_fences.size(); i++)
675                 if (acquire_fences[i] && prior_loads[i])
676                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
677                                 latest_backtrack = acquire_fences[i];
678         return latest_backtrack;
679 }
680
681 /**
682  * @brief Find the last backtracking conflict for a ModelAction
683  *
684  * This function performs the search for the most recent conflicting action
685  * against which we should perform backtracking. This primary includes pairs of
686  * synchronizing actions which should be explored in the opposite execution
687  * order.
688  *
689  * @param act The current action
690  * @return The most recent action which conflicts with act
691  */
692 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
693 {
694         switch (act->get_type()) {
695         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
696         case ATOMIC_READ:
697         case ATOMIC_WRITE:
698         case ATOMIC_RMW: {
699                 ModelAction *ret = NULL;
700
701                 /* linear search: from most recent to oldest */
702                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
703                 action_list_t::reverse_iterator rit;
704                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
705                         ModelAction *prev = *rit;
706                         if (prev->could_synchronize_with(act)) {
707                                 ret = prev;
708                                 break;
709                         }
710                 }
711
712                 ModelAction *ret2 = get_last_fence_conflict(act);
713                 if (!ret2)
714                         return ret;
715                 if (!ret)
716                         return ret2;
717                 if (*ret < *ret2)
718                         return ret2;
719                 return ret;
720         }
721         case ATOMIC_LOCK:
722         case ATOMIC_TRYLOCK: {
723                 /* linear search: from most recent to oldest */
724                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
725                 action_list_t::reverse_iterator rit;
726                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
727                         ModelAction *prev = *rit;
728                         if (act->is_conflicting_lock(prev))
729                                 return prev;
730                 }
731                 break;
732         }
733         case ATOMIC_UNLOCK: {
734                 /* linear search: from most recent to oldest */
735                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
736                 action_list_t::reverse_iterator rit;
737                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
738                         ModelAction *prev = *rit;
739                         if (!act->same_thread(prev) && prev->is_failed_trylock())
740                                 return prev;
741                 }
742                 break;
743         }
744         case ATOMIC_WAIT: {
745                 /* linear search: from most recent to oldest */
746                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
747                 action_list_t::reverse_iterator rit;
748                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
749                         ModelAction *prev = *rit;
750                         if (!act->same_thread(prev) && prev->is_failed_trylock())
751                                 return prev;
752                         if (!act->same_thread(prev) && prev->is_notify())
753                                 return prev;
754                 }
755                 break;
756         }
757
758         case ATOMIC_NOTIFY_ALL:
759         case ATOMIC_NOTIFY_ONE: {
760                 /* linear search: from most recent to oldest */
761                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
762                 action_list_t::reverse_iterator rit;
763                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
764                         ModelAction *prev = *rit;
765                         if (!act->same_thread(prev) && prev->is_wait())
766                                 return prev;
767                 }
768                 break;
769         }
770         default:
771                 break;
772         }
773         return NULL;
774 }
775
776 /** This method finds backtracking points where we should try to
777  * reorder the parameter ModelAction against.
778  *
779  * @param the ModelAction to find backtracking points for.
780  */
781 void ModelChecker::set_backtracking(ModelAction *act)
782 {
783         Thread *t = get_thread(act);
784         ModelAction *prev = get_last_conflict(act);
785         if (prev == NULL)
786                 return;
787
788         Node *node = prev->get_node()->get_parent();
789
790         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
791         int low_tid, high_tid;
792         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
793                 low_tid = id_to_int(act->get_tid());
794                 high_tid = low_tid + 1;
795         } else {
796                 low_tid = 0;
797                 high_tid = get_num_threads();
798         }
799
800         for (int i = low_tid; i < high_tid; i++) {
801                 thread_id_t tid = int_to_id(i);
802
803                 /* Make sure this thread can be enabled here. */
804                 if (i >= node->get_num_threads())
805                         break;
806
807                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
808                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
809                 if (node->enabled_status(tid) != THREAD_ENABLED)
810                         continue;
811
812                 /* Check if this has been explored already */
813                 if (node->has_been_explored(tid))
814                         continue;
815
816                 /* See if fairness allows */
817                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
818                         bool unfair = false;
819                         for (int t = 0; t < node->get_num_threads(); t++) {
820                                 thread_id_t tother = int_to_id(t);
821                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
822                                         unfair = true;
823                                         break;
824                                 }
825                         }
826                         if (unfair)
827                                 continue;
828                 }
829
830                 /* See if CHESS-like yield fairness allows */
831                 if (model->params.yieldon) {
832                         bool unfair = false;
833                         for (int t = 0; t < node->get_num_threads(); t++) {
834                                 thread_id_t tother = int_to_id(t);
835                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
836                                         unfair = true;
837                                         break;
838                                 }
839                         }
840                         if (unfair)
841                                 continue;
842                 }
843                 
844                 /* Cache the latest backtracking point */
845                 set_latest_backtrack(prev);
846
847                 /* If this is a new backtracking point, mark the tree */
848                 if (!node->set_backtrack(tid))
849                         continue;
850                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
851                                         id_to_int(prev->get_tid()),
852                                         id_to_int(t->get_id()));
853                 if (DBG_ENABLED()) {
854                         prev->print();
855                         act->print();
856                 }
857         }
858 }
859
860 /**
861  * @brief Cache the a backtracking point as the "most recent", if eligible
862  *
863  * Note that this does not prepare the NodeStack for this backtracking
864  * operation, it only caches the action on a per-execution basis
865  *
866  * @param act The operation at which we should explore a different next action
867  * (i.e., backtracking point)
868  * @return True, if this action is now the most recent backtracking point;
869  * false otherwise
870  */
871 bool ModelChecker::set_latest_backtrack(ModelAction *act)
872 {
873         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
874                 priv->next_backtrack = act;
875                 return true;
876         }
877         return false;
878 }
879
880 /**
881  * Returns last backtracking point. The model checker will explore a different
882  * path for this point in the next execution.
883  * @return The ModelAction at which the next execution should diverge.
884  */
885 ModelAction * ModelChecker::get_next_backtrack()
886 {
887         ModelAction *next = priv->next_backtrack;
888         priv->next_backtrack = NULL;
889         return next;
890 }
891
892 /**
893  * Processes a read model action.
894  * @param curr is the read model action to process.
895  * @return True if processing this read updates the mo_graph.
896  */
897 bool ModelChecker::process_read(ModelAction *curr)
898 {
899         Node *node = curr->get_node();
900         while (true) {
901                 bool updated = false;
902                 switch (node->get_read_from_status()) {
903                 case READ_FROM_PAST: {
904                         const ModelAction *rf = node->get_read_from_past();
905                         ASSERT(rf);
906
907                         mo_graph->startChanges();
908
909                         ASSERT(!is_infeasible());
910                         if (!check_recency(curr, rf)) {
911                                 if (node->increment_read_from()) {
912                                         mo_graph->rollbackChanges();
913                                         continue;
914                                 } else {
915                                         priv->too_many_reads = true;
916                                 }
917                         }
918
919                         updated = r_modification_order(curr, rf);
920                         read_from(curr, rf);
921                         mo_graph->commitChanges();
922                         mo_check_promises(curr, true);
923                         break;
924                 }
925                 case READ_FROM_PROMISE: {
926                         Promise *promise = curr->get_node()->get_read_from_promise();
927                         if (promise->add_reader(curr))
928                                 priv->failed_promise = true;
929                         curr->set_read_from_promise(promise);
930                         mo_graph->startChanges();
931                         if (!check_recency(curr, promise))
932                                 priv->too_many_reads = true;
933                         updated = r_modification_order(curr, promise);
934                         mo_graph->commitChanges();
935                         break;
936                 }
937                 case READ_FROM_FUTURE: {
938                         /* Read from future value */
939                         struct future_value fv = node->get_future_value();
940                         Promise *promise = new Promise(curr, fv);
941                         curr->set_read_from_promise(promise);
942                         promises->push_back(promise);
943                         mo_graph->startChanges();
944                         updated = r_modification_order(curr, promise);
945                         mo_graph->commitChanges();
946                         break;
947                 }
948                 default:
949                         ASSERT(false);
950                 }
951                 get_thread(curr)->set_return_value(curr->get_return_value());
952                 return updated;
953         }
954 }
955
956 /**
957  * Processes a lock, trylock, or unlock model action.  @param curr is
958  * the read model action to process.
959  *
960  * The try lock operation checks whether the lock is taken.  If not,
961  * it falls to the normal lock operation case.  If so, it returns
962  * fail.
963  *
964  * The lock operation has already been checked that it is enabled, so
965  * it just grabs the lock and synchronizes with the previous unlock.
966  *
967  * The unlock operation has to re-enable all of the threads that are
968  * waiting on the lock.
969  *
970  * @return True if synchronization was updated; false otherwise
971  */
972 bool ModelChecker::process_mutex(ModelAction *curr)
973 {
974         std::mutex *mutex = curr->get_mutex();
975         struct std::mutex_state *state = NULL;
976
977         if (mutex)
978                 state = mutex->get_state();
979
980         switch (curr->get_type()) {
981         case ATOMIC_TRYLOCK: {
982                 bool success = !state->locked;
983                 curr->set_try_lock(success);
984                 if (!success) {
985                         get_thread(curr)->set_return_value(0);
986                         break;
987                 }
988                 get_thread(curr)->set_return_value(1);
989         }
990                 //otherwise fall into the lock case
991         case ATOMIC_LOCK: {
992                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
993                         assert_bug("Lock access before initialization");
994                 state->locked = get_thread(curr);
995                 ModelAction *unlock = get_last_unlock(curr);
996                 //synchronize with the previous unlock statement
997                 if (unlock != NULL) {
998                         curr->synchronize_with(unlock);
999                         return true;
1000                 }
1001                 break;
1002         }
1003         case ATOMIC_WAIT:
1004         case ATOMIC_UNLOCK: {
1005                 /* wake up the other threads */
1006                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1007                         Thread *t = get_thread(int_to_id(i));
1008                         Thread *curr_thrd = get_thread(curr);
1009                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
1010                                 scheduler->wake(t);
1011                 }
1012
1013                 /* unlock the lock - after checking who was waiting on it */
1014                 state->locked = NULL;
1015
1016                 if (!curr->is_wait())
1017                         break; /* The rest is only for ATOMIC_WAIT */
1018
1019                 /* Should we go to sleep? (simulate spurious failures) */
1020                 if (curr->get_node()->get_misc() == 0) {
1021                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1022                         /* disable us */
1023                         scheduler->sleep(get_thread(curr));
1024                 }
1025                 break;
1026         }
1027         case ATOMIC_NOTIFY_ALL: {
1028                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1029                 //activate all the waiting threads
1030                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1031                         scheduler->wake(get_thread(*rit));
1032                 }
1033                 waiters->clear();
1034                 break;
1035         }
1036         case ATOMIC_NOTIFY_ONE: {
1037                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1038                 int wakeupthread = curr->get_node()->get_misc();
1039                 action_list_t::iterator it = waiters->begin();
1040                 advance(it, wakeupthread);
1041                 scheduler->wake(get_thread(*it));
1042                 waiters->erase(it);
1043                 break;
1044         }
1045
1046         default:
1047                 ASSERT(0);
1048         }
1049         return false;
1050 }
1051
1052 /**
1053  * @brief Check if the current pending promises allow a future value to be sent
1054  *
1055  * If one of the following is true:
1056  *  (a) there are no pending promises
1057  *  (b) the reader and writer do not cross any promises
1058  * Then, it is safe to pass a future value back now.
1059  *
1060  * Otherwise, we must save the pending future value until (a) or (b) is true
1061  *
1062  * @param writer The operation which sends the future value. Must be a write.
1063  * @param reader The operation which will observe the value. Must be a read.
1064  * @return True if the future value can be sent now; false if it must wait.
1065  */
1066 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1067                 const ModelAction *reader) const
1068 {
1069         if (promises->empty())
1070                 return true;
1071         for(int i=promises->size()-1;i>=0;i--) {
1072                 ModelAction *pr=(*promises)[i]->get_reader(0);
1073                 //reader is after promise...doesn't cross any promise
1074                 if (*reader > *pr)
1075                         return true;
1076                 //writer is after promise, reader before...bad...
1077                 if (*writer > *pr)
1078                         return false;
1079         }
1080         return true;
1081 }
1082
1083 /**
1084  * @brief Add a future value to a reader
1085  *
1086  * This function performs a few additional checks to ensure that the future
1087  * value can be feasibly observed by the reader
1088  *
1089  * @param writer The operation whose value is sent. Must be a write.
1090  * @param reader The read operation which may read the future value. Must be a read.
1091  */
1092 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1093 {
1094         /* Do more ambitious checks now that mo is more complete */
1095         if (!mo_may_allow(writer, reader))
1096                 return;
1097
1098         Node *node = reader->get_node();
1099
1100         /* Find an ancestor thread which exists at the time of the reader */
1101         Thread *write_thread = get_thread(writer);
1102         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1103                 write_thread = write_thread->get_parent();
1104
1105         struct future_value fv = {
1106                 writer->get_write_value(),
1107                 writer->get_seq_number() + params.maxfuturedelay,
1108                 write_thread->get_id(),
1109         };
1110         if (node->add_future_value(fv))
1111                 set_latest_backtrack(reader);
1112 }
1113
1114 /**
1115  * Process a write ModelAction
1116  * @param curr The ModelAction to process
1117  * @return True if the mo_graph was updated or promises were resolved
1118  */
1119 bool ModelChecker::process_write(ModelAction *curr)
1120 {
1121         /* Readers to which we may send our future value */
1122         ModelVector<ModelAction *> send_fv;
1123
1124         const ModelAction *earliest_promise_reader;
1125         bool updated_promises = false;
1126
1127         bool updated_mod_order = w_modification_order(curr, &send_fv);
1128         Promise *promise = pop_promise_to_resolve(curr);
1129
1130         if (promise) {
1131                 earliest_promise_reader = promise->get_reader(0);
1132                 updated_promises = resolve_promise(curr, promise);
1133         } else
1134                 earliest_promise_reader = NULL;
1135
1136         for (unsigned int i = 0; i < send_fv.size(); i++) {
1137                 ModelAction *read = send_fv[i];
1138
1139                 /* Don't send future values to reads after the Promise we resolve */
1140                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1141                         /* Check if future value can be sent immediately */
1142                         if (promises_may_allow(curr, read)) {
1143                                 add_future_value(curr, read);
1144                         } else {
1145                                 futurevalues->push_back(PendingFutureValue(curr, read));
1146                         }
1147                 }
1148         }
1149
1150         /* Check the pending future values */
1151         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1152                 struct PendingFutureValue pfv = (*futurevalues)[i];
1153                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1154                         add_future_value(pfv.writer, pfv.reader);
1155                         futurevalues->erase(futurevalues->begin() + i);
1156                 }
1157         }
1158
1159         mo_graph->commitChanges();
1160         mo_check_promises(curr, false);
1161
1162         get_thread(curr)->set_return_value(VALUE_NONE);
1163         return updated_mod_order || updated_promises;
1164 }
1165
1166 /**
1167  * Process a fence ModelAction
1168  * @param curr The ModelAction to process
1169  * @return True if synchronization was updated
1170  */
1171 bool ModelChecker::process_fence(ModelAction *curr)
1172 {
1173         /*
1174          * fence-relaxed: no-op
1175          * fence-release: only log the occurence (not in this function), for
1176          *   use in later synchronization
1177          * fence-acquire (this function): search for hypothetical release
1178          *   sequences
1179          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1180          */
1181         bool updated = false;
1182         if (curr->is_acquire()) {
1183                 action_list_t *list = action_trace;
1184                 action_list_t::reverse_iterator rit;
1185                 /* Find X : is_read(X) && X --sb-> curr */
1186                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1187                         ModelAction *act = *rit;
1188                         if (act == curr)
1189                                 continue;
1190                         if (act->get_tid() != curr->get_tid())
1191                                 continue;
1192                         /* Stop at the beginning of the thread */
1193                         if (act->is_thread_start())
1194                                 break;
1195                         /* Stop once we reach a prior fence-acquire */
1196                         if (act->is_fence() && act->is_acquire())
1197                                 break;
1198                         if (!act->is_read())
1199                                 continue;
1200                         /* read-acquire will find its own release sequences */
1201                         if (act->is_acquire())
1202                                 continue;
1203
1204                         /* Establish hypothetical release sequences */
1205                         rel_heads_list_t release_heads;
1206                         get_release_seq_heads(curr, act, &release_heads);
1207                         for (unsigned int i = 0; i < release_heads.size(); i++)
1208                                 if (!curr->synchronize_with(release_heads[i]))
1209                                         set_bad_synchronization();
1210                         if (release_heads.size() != 0)
1211                                 updated = true;
1212                 }
1213         }
1214         return updated;
1215 }
1216
1217 /**
1218  * @brief Process the current action for thread-related activity
1219  *
1220  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1221  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1222  * synchronization, etc.  This function is a no-op for non-THREAD actions
1223  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1224  *
1225  * @param curr The current action
1226  * @return True if synchronization was updated or a thread completed
1227  */
1228 bool ModelChecker::process_thread_action(ModelAction *curr)
1229 {
1230         bool updated = false;
1231
1232         switch (curr->get_type()) {
1233         case THREAD_CREATE: {
1234                 thrd_t *thrd = (thrd_t *)curr->get_location();
1235                 struct thread_params *params = (struct thread_params *)curr->get_value();
1236                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1237                 add_thread(th);
1238                 th->set_creation(curr);
1239                 /* Promises can be satisfied by children */
1240                 for (unsigned int i = 0; i < promises->size(); i++) {
1241                         Promise *promise = (*promises)[i];
1242                         if (promise->thread_is_available(curr->get_tid()))
1243                                 promise->add_thread(th->get_id());
1244                 }
1245                 break;
1246         }
1247         case THREAD_JOIN: {
1248                 Thread *blocking = curr->get_thread_operand();
1249                 ModelAction *act = get_last_action(blocking->get_id());
1250                 curr->synchronize_with(act);
1251                 updated = true; /* trigger rel-seq checks */
1252                 break;
1253         }
1254         case THREAD_FINISH: {
1255                 Thread *th = get_thread(curr);
1256                 /* Wake up any joining threads */
1257                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1258                         Thread *waiting = get_thread(int_to_id(i));
1259                         if (waiting->waiting_on() == th &&
1260                                         waiting->get_pending()->is_thread_join())
1261                                 scheduler->wake(waiting);
1262                 }
1263                 th->complete();
1264                 /* Completed thread can't satisfy promises */
1265                 for (unsigned int i = 0; i < promises->size(); i++) {
1266                         Promise *promise = (*promises)[i];
1267                         if (promise->thread_is_available(th->get_id()))
1268                                 if (promise->eliminate_thread(th->get_id()))
1269                                         priv->failed_promise = true;
1270                 }
1271                 updated = true; /* trigger rel-seq checks */
1272                 break;
1273         }
1274         case THREAD_START: {
1275                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1276                 break;
1277         }
1278         default:
1279                 break;
1280         }
1281
1282         return updated;
1283 }
1284
1285 /**
1286  * @brief Process the current action for release sequence fixup activity
1287  *
1288  * Performs model-checker release sequence fixups for the current action,
1289  * forcing a single pending release sequence to break (with a given, potential
1290  * "loose" write) or to complete (i.e., synchronize). If a pending release
1291  * sequence forms a complete release sequence, then we must perform the fixup
1292  * synchronization, mo_graph additions, etc.
1293  *
1294  * @param curr The current action; must be a release sequence fixup action
1295  * @param work_queue The work queue to which to add work items as they are
1296  * generated
1297  */
1298 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1299 {
1300         const ModelAction *write = curr->get_node()->get_relseq_break();
1301         struct release_seq *sequence = pending_rel_seqs->back();
1302         pending_rel_seqs->pop_back();
1303         ASSERT(sequence);
1304         ModelAction *acquire = sequence->acquire;
1305         const ModelAction *rf = sequence->rf;
1306         const ModelAction *release = sequence->release;
1307         ASSERT(acquire);
1308         ASSERT(release);
1309         ASSERT(rf);
1310         ASSERT(release->same_thread(rf));
1311
1312         if (write == NULL) {
1313                 /**
1314                  * @todo Forcing a synchronization requires that we set
1315                  * modification order constraints. For instance, we can't allow
1316                  * a fixup sequence in which two separate read-acquire
1317                  * operations read from the same sequence, where the first one
1318                  * synchronizes and the other doesn't. Essentially, we can't
1319                  * allow any writes to insert themselves between 'release' and
1320                  * 'rf'
1321                  */
1322
1323                 /* Must synchronize */
1324                 if (!acquire->synchronize_with(release)) {
1325                         set_bad_synchronization();
1326                         return;
1327                 }
1328                 /* Re-check all pending release sequences */
1329                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1330                 /* Re-check act for mo_graph edges */
1331                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1332
1333                 /* propagate synchronization to later actions */
1334                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1335                 for (; (*rit) != acquire; rit++) {
1336                         ModelAction *propagate = *rit;
1337                         if (acquire->happens_before(propagate)) {
1338                                 propagate->synchronize_with(acquire);
1339                                 /* Re-check 'propagate' for mo_graph edges */
1340                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1341                         }
1342                 }
1343         } else {
1344                 /* Break release sequence with new edges:
1345                  *   release --mo--> write --mo--> rf */
1346                 mo_graph->addEdge(release, write);
1347                 mo_graph->addEdge(write, rf);
1348         }
1349
1350         /* See if we have realized a data race */
1351         checkDataRaces();
1352 }
1353
1354 /**
1355  * Initialize the current action by performing one or more of the following
1356  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1357  * in the NodeStack, manipulating backtracking sets, allocating and
1358  * initializing clock vectors, and computing the promises to fulfill.
1359  *
1360  * @param curr The current action, as passed from the user context; may be
1361  * freed/invalidated after the execution of this function, with a different
1362  * action "returned" its place (pass-by-reference)
1363  * @return True if curr is a newly-explored action; false otherwise
1364  */
1365 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1366 {
1367         ModelAction *newcurr;
1368
1369         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1370                 newcurr = process_rmw(*curr);
1371                 delete *curr;
1372
1373                 if (newcurr->is_rmw())
1374                         compute_promises(newcurr);
1375
1376                 *curr = newcurr;
1377                 return false;
1378         }
1379
1380         (*curr)->set_seq_number(get_next_seq_num());
1381
1382         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1383         if (newcurr) {
1384                 /* First restore type and order in case of RMW operation */
1385                 if ((*curr)->is_rmwr())
1386                         newcurr->copy_typeandorder(*curr);
1387
1388                 ASSERT((*curr)->get_location() == newcurr->get_location());
1389                 newcurr->copy_from_new(*curr);
1390
1391                 /* Discard duplicate ModelAction; use action from NodeStack */
1392                 delete *curr;
1393
1394                 /* Always compute new clock vector */
1395                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1396
1397                 *curr = newcurr;
1398                 return false; /* Action was explored previously */
1399         } else {
1400                 newcurr = *curr;
1401
1402                 /* Always compute new clock vector */
1403                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1404
1405                 /* Assign most recent release fence */
1406                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1407
1408                 /*
1409                  * Perform one-time actions when pushing new ModelAction onto
1410                  * NodeStack
1411                  */
1412                 if (newcurr->is_write())
1413                         compute_promises(newcurr);
1414                 else if (newcurr->is_relseq_fixup())
1415                         compute_relseq_breakwrites(newcurr);
1416                 else if (newcurr->is_wait())
1417                         newcurr->get_node()->set_misc_max(2);
1418                 else if (newcurr->is_notify_one()) {
1419                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1420                 }
1421                 return true; /* This was a new ModelAction */
1422         }
1423 }
1424
1425 /**
1426  * @brief Establish reads-from relation between two actions
1427  *
1428  * Perform basic operations involved with establishing a concrete rf relation,
1429  * including setting the ModelAction data and checking for release sequences.
1430  *
1431  * @param act The action that is reading (must be a read)
1432  * @param rf The action from which we are reading (must be a write)
1433  *
1434  * @return True if this read established synchronization
1435  */
1436 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1437 {
1438         ASSERT(rf);
1439         ASSERT(rf->is_write());
1440
1441         act->set_read_from(rf);
1442         if (act->is_acquire()) {
1443                 rel_heads_list_t release_heads;
1444                 get_release_seq_heads(act, act, &release_heads);
1445                 int num_heads = release_heads.size();
1446                 for (unsigned int i = 0; i < release_heads.size(); i++)
1447                         if (!act->synchronize_with(release_heads[i])) {
1448                                 set_bad_synchronization();
1449                                 num_heads--;
1450                         }
1451                 return num_heads > 0;
1452         }
1453         return false;
1454 }
1455
1456 /**
1457  * Check promises and eliminate potentially-satisfying threads when a thread is
1458  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1459  * no longer satisfy a promise generated from that thread.
1460  *
1461  * @param blocker The thread on which a thread is waiting
1462  * @param waiting The waiting thread
1463  */
1464 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1465 {
1466         for (unsigned int i = 0; i < promises->size(); i++) {
1467                 Promise *promise = (*promises)[i];
1468                 if (!promise->thread_is_available(waiting->get_id()))
1469                         continue;
1470                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1471                         ModelAction *reader = promise->get_reader(j);
1472                         if (reader->get_tid() != blocker->get_id())
1473                                 continue;
1474                         if (promise->eliminate_thread(waiting->get_id())) {
1475                                 /* Promise has failed */
1476                                 priv->failed_promise = true;
1477                         } else {
1478                                 /* Only eliminate the 'waiting' thread once */
1479                                 return;
1480                         }
1481                 }
1482         }
1483 }
1484
1485 /**
1486  * @brief Check whether a model action is enabled.
1487  *
1488  * Checks whether a lock or join operation would be successful (i.e., is the
1489  * lock already locked, or is the joined thread already complete). If not, put
1490  * the action in a waiter list.
1491  *
1492  * @param curr is the ModelAction to check whether it is enabled.
1493  * @return a bool that indicates whether the action is enabled.
1494  */
1495 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1496         if (curr->is_lock()) {
1497                 std::mutex *lock = curr->get_mutex();
1498                 struct std::mutex_state *state = lock->get_state();
1499                 if (state->locked)
1500                         return false;
1501         } else if (curr->is_thread_join()) {
1502                 Thread *blocking = curr->get_thread_operand();
1503                 if (!blocking->is_complete()) {
1504                         thread_blocking_check_promises(blocking, get_thread(curr));
1505                         return false;
1506                 }
1507         }
1508
1509         return true;
1510 }
1511
1512 /**
1513  * This is the heart of the model checker routine. It performs model-checking
1514  * actions corresponding to a given "current action." Among other processes, it
1515  * calculates reads-from relationships, updates synchronization clock vectors,
1516  * forms a memory_order constraints graph, and handles replay/backtrack
1517  * execution when running permutations of previously-observed executions.
1518  *
1519  * @param curr The current action to process
1520  * @return The ModelAction that is actually executed; may be different than
1521  * curr; may be NULL, if the current action is not enabled to run
1522  */
1523 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1524 {
1525         ASSERT(curr);
1526         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1527         bool newly_explored = initialize_curr_action(&curr);
1528
1529         DBG();
1530
1531         wake_up_sleeping_actions(curr);
1532
1533         /* Compute fairness information for CHESS yield algorithm */
1534         if (model->params.yieldon) {
1535                 curr->get_node()->update_yield(scheduler);
1536         }
1537
1538         /* Add the action to lists before any other model-checking tasks */
1539         if (!second_part_of_rmw)
1540                 add_action_to_lists(curr);
1541
1542         /* Build may_read_from set for newly-created actions */
1543         if (newly_explored && curr->is_read())
1544                 build_may_read_from(curr);
1545
1546         /* Initialize work_queue with the "current action" work */
1547         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1548         while (!work_queue.empty() && !has_asserted()) {
1549                 WorkQueueEntry work = work_queue.front();
1550                 work_queue.pop_front();
1551
1552                 switch (work.type) {
1553                 case WORK_CHECK_CURR_ACTION: {
1554                         ModelAction *act = work.action;
1555                         bool update = false; /* update this location's release seq's */
1556                         bool update_all = false; /* update all release seq's */
1557
1558                         if (process_thread_action(curr))
1559                                 update_all = true;
1560
1561                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1562                                 update = true;
1563
1564                         if (act->is_write() && process_write(act))
1565                                 update = true;
1566
1567                         if (act->is_fence() && process_fence(act))
1568                                 update_all = true;
1569
1570                         if (act->is_mutex_op() && process_mutex(act))
1571                                 update_all = true;
1572
1573                         if (act->is_relseq_fixup())
1574                                 process_relseq_fixup(curr, &work_queue);
1575
1576                         if (update_all)
1577                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1578                         else if (update)
1579                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1580                         break;
1581                 }
1582                 case WORK_CHECK_RELEASE_SEQ:
1583                         resolve_release_sequences(work.location, &work_queue);
1584                         break;
1585                 case WORK_CHECK_MO_EDGES: {
1586                         /** @todo Complete verification of work_queue */
1587                         ModelAction *act = work.action;
1588                         bool updated = false;
1589
1590                         if (act->is_read()) {
1591                                 const ModelAction *rf = act->get_reads_from();
1592                                 const Promise *promise = act->get_reads_from_promise();
1593                                 if (rf) {
1594                                         if (r_modification_order(act, rf))
1595                                                 updated = true;
1596                                 } else if (promise) {
1597                                         if (r_modification_order(act, promise))
1598                                                 updated = true;
1599                                 }
1600                         }
1601                         if (act->is_write()) {
1602                                 if (w_modification_order(act, NULL))
1603                                         updated = true;
1604                         }
1605                         mo_graph->commitChanges();
1606
1607                         if (updated)
1608                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1609                         break;
1610                 }
1611                 default:
1612                         ASSERT(false);
1613                         break;
1614                 }
1615         }
1616
1617         check_curr_backtracking(curr);
1618         set_backtracking(curr);
1619         return curr;
1620 }
1621
1622 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1623 {
1624         Node *currnode = curr->get_node();
1625         Node *parnode = currnode->get_parent();
1626
1627         if ((parnode && !parnode->backtrack_empty()) ||
1628                          !currnode->misc_empty() ||
1629                          !currnode->read_from_empty() ||
1630                          !currnode->promise_empty() ||
1631                          !currnode->relseq_break_empty()) {
1632                 set_latest_backtrack(curr);
1633         }
1634 }
1635
1636 bool ModelChecker::promises_expired() const
1637 {
1638         for (unsigned int i = 0; i < promises->size(); i++) {
1639                 Promise *promise = (*promises)[i];
1640                 if (promise->get_expiration() < priv->used_sequence_numbers)
1641                         return true;
1642         }
1643         return false;
1644 }
1645
1646 /**
1647  * This is the strongest feasibility check available.
1648  * @return whether the current trace (partial or complete) must be a prefix of
1649  * a feasible trace.
1650  */
1651 bool ModelChecker::isfeasibleprefix() const
1652 {
1653         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1654 }
1655
1656 /**
1657  * Print disagnostic information about an infeasible execution
1658  * @param prefix A string to prefix the output with; if NULL, then a default
1659  * message prefix will be provided
1660  */
1661 void ModelChecker::print_infeasibility(const char *prefix) const
1662 {
1663         char buf[100];
1664         char *ptr = buf;
1665         if (mo_graph->checkForCycles())
1666                 ptr += sprintf(ptr, "[mo cycle]");
1667         if (priv->failed_promise)
1668                 ptr += sprintf(ptr, "[failed promise]");
1669         if (priv->too_many_reads)
1670                 ptr += sprintf(ptr, "[too many reads]");
1671         if (priv->no_valid_reads)
1672                 ptr += sprintf(ptr, "[no valid reads-from]");
1673         if (priv->bad_synchronization)
1674                 ptr += sprintf(ptr, "[bad sw ordering]");
1675         if (promises_expired())
1676                 ptr += sprintf(ptr, "[promise expired]");
1677         if (promises->size() != 0)
1678                 ptr += sprintf(ptr, "[unresolved promise]");
1679         if (ptr != buf)
1680                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1681 }
1682
1683 /**
1684  * Returns whether the current completed trace is feasible, except for pending
1685  * release sequences.
1686  */
1687 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1688 {
1689         return !is_infeasible() && promises->size() == 0;
1690 }
1691
1692 /**
1693  * Check if the current partial trace is infeasible. Does not check any
1694  * end-of-execution flags, which might rule out the execution. Thus, this is
1695  * useful only for ruling an execution as infeasible.
1696  * @return whether the current partial trace is infeasible.
1697  */
1698 bool ModelChecker::is_infeasible() const
1699 {
1700         return mo_graph->checkForCycles() ||
1701                 priv->no_valid_reads ||
1702                 priv->failed_promise ||
1703                 priv->too_many_reads ||
1704                 priv->bad_synchronization ||
1705                 promises_expired();
1706 }
1707
1708 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1709 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1710         ModelAction *lastread = get_last_action(act->get_tid());
1711         lastread->process_rmw(act);
1712         if (act->is_rmw()) {
1713                 if (lastread->get_reads_from())
1714                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1715                 else
1716                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1717                 mo_graph->commitChanges();
1718         }
1719         return lastread;
1720 }
1721
1722 /**
1723  * A helper function for ModelChecker::check_recency, to check if the current
1724  * thread is able to read from a different write/promise for 'params.maxreads'
1725  * number of steps and if that write/promise should become visible (i.e., is
1726  * ordered later in the modification order). This helps model memory liveness.
1727  *
1728  * @param curr The current action. Must be a read.
1729  * @param rf The write/promise from which we plan to read
1730  * @param other_rf The write/promise from which we may read
1731  * @return True if we were able to read from other_rf for params.maxreads steps
1732  */
1733 template <typename T, typename U>
1734 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1735 {
1736         /* Need a different write/promise */
1737         if (other_rf->equals(rf))
1738                 return false;
1739
1740         /* Only look for "newer" writes/promises */
1741         if (!mo_graph->checkReachable(rf, other_rf))
1742                 return false;
1743
1744         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1745         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1746         action_list_t::reverse_iterator rit = list->rbegin();
1747         ASSERT((*rit) == curr);
1748         /* Skip past curr */
1749         rit++;
1750
1751         /* Does this write/promise work for everyone? */
1752         for (int i = 0; i < params.maxreads; i++, rit++) {
1753                 ModelAction *act = *rit;
1754                 if (!act->may_read_from(other_rf))
1755                         return false;
1756         }
1757         return true;
1758 }
1759
1760 /**
1761  * Checks whether a thread has read from the same write or Promise for too many
1762  * times without seeing the effects of a later write/Promise.
1763  *
1764  * Basic idea:
1765  * 1) there must a different write/promise that we could read from,
1766  * 2) we must have read from the same write/promise in excess of maxreads times,
1767  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1768  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1769  *
1770  * If so, we decide that the execution is no longer feasible.
1771  *
1772  * @param curr The current action. Must be a read.
1773  * @param rf The ModelAction/Promise from which we might read.
1774  * @return True if the read should succeed; false otherwise
1775  */
1776 template <typename T>
1777 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1778 {
1779         if (!params.maxreads)
1780                 return true;
1781
1782         //NOTE: Next check is just optimization, not really necessary....
1783         if (curr->get_node()->get_read_from_past_size() +
1784                         curr->get_node()->get_read_from_promise_size() <= 1)
1785                 return true;
1786
1787         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1788         int tid = id_to_int(curr->get_tid());
1789         ASSERT(tid < (int)thrd_lists->size());
1790         action_list_t *list = &(*thrd_lists)[tid];
1791         action_list_t::reverse_iterator rit = list->rbegin();
1792         ASSERT((*rit) == curr);
1793         /* Skip past curr */
1794         rit++;
1795
1796         action_list_t::reverse_iterator ritcopy = rit;
1797         /* See if we have enough reads from the same value */
1798         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1799                 if (ritcopy == list->rend())
1800                         return true;
1801                 ModelAction *act = *ritcopy;
1802                 if (!act->is_read())
1803                         return true;
1804                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1805                         return true;
1806                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1807                         return true;
1808                 if (act->get_node()->get_read_from_past_size() +
1809                                 act->get_node()->get_read_from_promise_size() <= 1)
1810                         return true;
1811         }
1812         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1813                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1814                 if (should_read_instead(curr, rf, write))
1815                         return false; /* liveness failure */
1816         }
1817         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1818                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1819                 if (should_read_instead(curr, rf, promise))
1820                         return false; /* liveness failure */
1821         }
1822         return true;
1823 }
1824
1825 /**
1826  * @brief Updates the mo_graph with the constraints imposed from the current
1827  * read.
1828  *
1829  * Basic idea is the following: Go through each other thread and find
1830  * the last action that happened before our read.  Two cases:
1831  *
1832  * -# The action is a write: that write must either occur before
1833  * the write we read from or be the write we read from.
1834  * -# The action is a read: the write that that action read from
1835  * must occur before the write we read from or be the same write.
1836  *
1837  * @param curr The current action. Must be a read.
1838  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1839  * @return True if modification order edges were added; false otherwise
1840  */
1841 template <typename rf_type>
1842 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1843 {
1844         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1845         unsigned int i;
1846         bool added = false;
1847         ASSERT(curr->is_read());
1848
1849         /* Last SC fence in the current thread */
1850         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1851         ModelAction *last_sc_write = NULL;
1852         if (curr->is_seqcst())
1853                 last_sc_write = get_last_seq_cst_write(curr);
1854
1855         /* Iterate over all threads */
1856         for (i = 0; i < thrd_lists->size(); i++) {
1857                 /* Last SC fence in thread i */
1858                 ModelAction *last_sc_fence_thread_local = NULL;
1859                 if (int_to_id((int)i) != curr->get_tid())
1860                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1861
1862                 /* Last SC fence in thread i, before last SC fence in current thread */
1863                 ModelAction *last_sc_fence_thread_before = NULL;
1864                 if (last_sc_fence_local)
1865                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1866
1867                 /* Iterate over actions in thread, starting from most recent */
1868                 action_list_t *list = &(*thrd_lists)[i];
1869                 action_list_t::reverse_iterator rit;
1870                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1871                         ModelAction *act = *rit;
1872
1873                         /* Skip curr */
1874                         if (act == curr)
1875                                 continue;
1876                         /* Don't want to add reflexive edges on 'rf' */
1877                         if (act->equals(rf)) {
1878                                 if (act->happens_before(curr))
1879                                         break;
1880                                 else
1881                                         continue;
1882                         }
1883
1884                         if (act->is_write()) {
1885                                 /* C++, Section 29.3 statement 5 */
1886                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1887                                                 *act < *last_sc_fence_thread_local) {
1888                                         added = mo_graph->addEdge(act, rf) || added;
1889                                         break;
1890                                 }
1891                                 /* C++, Section 29.3 statement 4 */
1892                                 else if (act->is_seqcst() && last_sc_fence_local &&
1893                                                 *act < *last_sc_fence_local) {
1894                                         added = mo_graph->addEdge(act, rf) || added;
1895                                         break;
1896                                 }
1897                                 /* C++, Section 29.3 statement 6 */
1898                                 else if (last_sc_fence_thread_before &&
1899                                                 *act < *last_sc_fence_thread_before) {
1900                                         added = mo_graph->addEdge(act, rf) || added;
1901                                         break;
1902                                 }
1903                         }
1904
1905                         /* C++, Section 29.3 statement 3 (second subpoint) */
1906                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1907                                 added = mo_graph->addEdge(act, rf) || added;
1908                                 break;
1909                         }
1910
1911                         /*
1912                          * Include at most one act per-thread that "happens
1913                          * before" curr
1914                          */
1915                         if (act->happens_before(curr)) {
1916                                 if (act->is_write()) {
1917                                         added = mo_graph->addEdge(act, rf) || added;
1918                                 } else {
1919                                         const ModelAction *prevrf = act->get_reads_from();
1920                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1921                                         if (prevrf) {
1922                                                 if (!prevrf->equals(rf))
1923                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1924                                         } else if (!prevrf_promise->equals(rf)) {
1925                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1926                                         }
1927                                 }
1928                                 break;
1929                         }
1930                 }
1931         }
1932
1933         /*
1934          * All compatible, thread-exclusive promises must be ordered after any
1935          * concrete loads from the same thread
1936          */
1937         for (unsigned int i = 0; i < promises->size(); i++)
1938                 if ((*promises)[i]->is_compatible_exclusive(curr))
1939                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1940
1941         return added;
1942 }
1943
1944 /**
1945  * Updates the mo_graph with the constraints imposed from the current write.
1946  *
1947  * Basic idea is the following: Go through each other thread and find
1948  * the lastest action that happened before our write.  Two cases:
1949  *
1950  * (1) The action is a write => that write must occur before
1951  * the current write
1952  *
1953  * (2) The action is a read => the write that that action read from
1954  * must occur before the current write.
1955  *
1956  * This method also handles two other issues:
1957  *
1958  * (I) Sequential Consistency: Making sure that if the current write is
1959  * seq_cst, that it occurs after the previous seq_cst write.
1960  *
1961  * (II) Sending the write back to non-synchronizing reads.
1962  *
1963  * @param curr The current action. Must be a write.
1964  * @param send_fv A vector for stashing reads to which we may pass our future
1965  * value. If NULL, then don't record any future values.
1966  * @return True if modification order edges were added; false otherwise
1967  */
1968 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1969 {
1970         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1971         unsigned int i;
1972         bool added = false;
1973         ASSERT(curr->is_write());
1974
1975         if (curr->is_seqcst()) {
1976                 /* We have to at least see the last sequentially consistent write,
1977                          so we are initialized. */
1978                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1979                 if (last_seq_cst != NULL) {
1980                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1981                 }
1982         }
1983
1984         /* Last SC fence in the current thread */
1985         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1986
1987         /* Iterate over all threads */
1988         for (i = 0; i < thrd_lists->size(); i++) {
1989                 /* Last SC fence in thread i, before last SC fence in current thread */
1990                 ModelAction *last_sc_fence_thread_before = NULL;
1991                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1992                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1993
1994                 /* Iterate over actions in thread, starting from most recent */
1995                 action_list_t *list = &(*thrd_lists)[i];
1996                 action_list_t::reverse_iterator rit;
1997                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1998                         ModelAction *act = *rit;
1999                         if (act == curr) {
2000                                 /*
2001                                  * 1) If RMW and it actually read from something, then we
2002                                  * already have all relevant edges, so just skip to next
2003                                  * thread.
2004                                  *
2005                                  * 2) If RMW and it didn't read from anything, we should
2006                                  * whatever edge we can get to speed up convergence.
2007                                  *
2008                                  * 3) If normal write, we need to look at earlier actions, so
2009                                  * continue processing list.
2010                                  */
2011                                 if (curr->is_rmw()) {
2012                                         if (curr->get_reads_from() != NULL)
2013                                                 break;
2014                                         else
2015                                                 continue;
2016                                 } else
2017                                         continue;
2018                         }
2019
2020                         /* C++, Section 29.3 statement 7 */
2021                         if (last_sc_fence_thread_before && act->is_write() &&
2022                                         *act < *last_sc_fence_thread_before) {
2023                                 added = mo_graph->addEdge(act, curr) || added;
2024                                 break;
2025                         }
2026
2027                         /*
2028                          * Include at most one act per-thread that "happens
2029                          * before" curr
2030                          */
2031                         if (act->happens_before(curr)) {
2032                                 /*
2033                                  * Note: if act is RMW, just add edge:
2034                                  *   act --mo--> curr
2035                                  * The following edge should be handled elsewhere:
2036                                  *   readfrom(act) --mo--> act
2037                                  */
2038                                 if (act->is_write())
2039                                         added = mo_graph->addEdge(act, curr) || added;
2040                                 else if (act->is_read()) {
2041                                         //if previous read accessed a null, just keep going
2042                                         if (act->get_reads_from() == NULL)
2043                                                 continue;
2044                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2045                                 }
2046                                 break;
2047                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2048                                                      !act->same_thread(curr)) {
2049                                 /* We have an action that:
2050                                    (1) did not happen before us
2051                                    (2) is a read and we are a write
2052                                    (3) cannot synchronize with us
2053                                    (4) is in a different thread
2054                                    =>
2055                                    that read could potentially read from our write.  Note that
2056                                    these checks are overly conservative at this point, we'll
2057                                    do more checks before actually removing the
2058                                    pendingfuturevalue.
2059
2060                                  */
2061                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2062                                         if (!is_infeasible())
2063                                                 send_fv->push_back(act);
2064                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2065                                                 add_future_value(curr, act);
2066                                 }
2067                         }
2068                 }
2069         }
2070
2071         /*
2072          * All compatible, thread-exclusive promises must be ordered after any
2073          * concrete stores to the same thread, or else they can be merged with
2074          * this store later
2075          */
2076         for (unsigned int i = 0; i < promises->size(); i++)
2077                 if ((*promises)[i]->is_compatible_exclusive(curr))
2078                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2079
2080         return added;
2081 }
2082
2083 /** Arbitrary reads from the future are not allowed.  Section 29.3
2084  * part 9 places some constraints.  This method checks one result of constraint
2085  * constraint.  Others require compiler support. */
2086 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2087 {
2088         if (!writer->is_rmw())
2089                 return true;
2090
2091         if (!reader->is_rmw())
2092                 return true;
2093
2094         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2095                 if (search == reader)
2096                         return false;
2097                 if (search->get_tid() == reader->get_tid() &&
2098                                 search->happens_before(reader))
2099                         break;
2100         }
2101
2102         return true;
2103 }
2104
2105 /**
2106  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2107  * some constraints. This method checks one the following constraint (others
2108  * require compiler support):
2109  *
2110  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2111  */
2112 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2113 {
2114         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2115         unsigned int i;
2116         /* Iterate over all threads */
2117         for (i = 0; i < thrd_lists->size(); i++) {
2118                 const ModelAction *write_after_read = NULL;
2119
2120                 /* Iterate over actions in thread, starting from most recent */
2121                 action_list_t *list = &(*thrd_lists)[i];
2122                 action_list_t::reverse_iterator rit;
2123                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2124                         ModelAction *act = *rit;
2125
2126                         /* Don't disallow due to act == reader */
2127                         if (!reader->happens_before(act) || reader == act)
2128                                 break;
2129                         else if (act->is_write())
2130                                 write_after_read = act;
2131                         else if (act->is_read() && act->get_reads_from() != NULL)
2132                                 write_after_read = act->get_reads_from();
2133                 }
2134
2135                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2136                         return false;
2137         }
2138         return true;
2139 }
2140
2141 /**
2142  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2143  * The ModelAction under consideration is expected to be taking part in
2144  * release/acquire synchronization as an object of the "reads from" relation.
2145  * Note that this can only provide release sequence support for RMW chains
2146  * which do not read from the future, as those actions cannot be traced until
2147  * their "promise" is fulfilled. Similarly, we may not even establish the
2148  * presence of a release sequence with certainty, as some modification order
2149  * constraints may be decided further in the future. Thus, this function
2150  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2151  * and a boolean representing certainty.
2152  *
2153  * @param rf The action that might be part of a release sequence. Must be a
2154  * write.
2155  * @param release_heads A pass-by-reference style return parameter. After
2156  * execution of this function, release_heads will contain the heads of all the
2157  * relevant release sequences, if any exists with certainty
2158  * @param pending A pass-by-reference style return parameter which is only used
2159  * when returning false (i.e., uncertain). Returns most information regarding
2160  * an uncertain release sequence, including any write operations that might
2161  * break the sequence.
2162  * @return true, if the ModelChecker is certain that release_heads is complete;
2163  * false otherwise
2164  */
2165 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2166                 rel_heads_list_t *release_heads,
2167                 struct release_seq *pending) const
2168 {
2169         /* Only check for release sequences if there are no cycles */
2170         if (mo_graph->checkForCycles())
2171                 return false;
2172
2173         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2174                 ASSERT(rf->is_write());
2175
2176                 if (rf->is_release())
2177                         release_heads->push_back(rf);
2178                 else if (rf->get_last_fence_release())
2179                         release_heads->push_back(rf->get_last_fence_release());
2180                 if (!rf->is_rmw())
2181                         break; /* End of RMW chain */
2182
2183                 /** @todo Need to be smarter here...  In the linux lock
2184                  * example, this will run to the beginning of the program for
2185                  * every acquire. */
2186                 /** @todo The way to be smarter here is to keep going until 1
2187                  * thread has a release preceded by an acquire and you've seen
2188                  *       both. */
2189
2190                 /* acq_rel RMW is a sufficient stopping condition */
2191                 if (rf->is_acquire() && rf->is_release())
2192                         return true; /* complete */
2193         };
2194         if (!rf) {
2195                 /* read from future: need to settle this later */
2196                 pending->rf = NULL;
2197                 return false; /* incomplete */
2198         }
2199
2200         if (rf->is_release())
2201                 return true; /* complete */
2202
2203         /* else relaxed write
2204          * - check for fence-release in the same thread (29.8, stmt. 3)
2205          * - check modification order for contiguous subsequence
2206          *   -> rf must be same thread as release */
2207
2208         const ModelAction *fence_release = rf->get_last_fence_release();
2209         /* Synchronize with a fence-release unconditionally; we don't need to
2210          * find any more "contiguous subsequence..." for it */
2211         if (fence_release)
2212                 release_heads->push_back(fence_release);
2213
2214         int tid = id_to_int(rf->get_tid());
2215         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2216         action_list_t *list = &(*thrd_lists)[tid];
2217         action_list_t::const_reverse_iterator rit;
2218
2219         /* Find rf in the thread list */
2220         rit = std::find(list->rbegin(), list->rend(), rf);
2221         ASSERT(rit != list->rend());
2222
2223         /* Find the last {write,fence}-release */
2224         for (; rit != list->rend(); rit++) {
2225                 if (fence_release && *(*rit) < *fence_release)
2226                         break;
2227                 if ((*rit)->is_release())
2228                         break;
2229         }
2230         if (rit == list->rend()) {
2231                 /* No write-release in this thread */
2232                 return true; /* complete */
2233         } else if (fence_release && *(*rit) < *fence_release) {
2234                 /* The fence-release is more recent (and so, "stronger") than
2235                  * the most recent write-release */
2236                 return true; /* complete */
2237         } /* else, need to establish contiguous release sequence */
2238         ModelAction *release = *rit;
2239
2240         ASSERT(rf->same_thread(release));
2241
2242         pending->writes.clear();
2243
2244         bool certain = true;
2245         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2246                 if (id_to_int(rf->get_tid()) == (int)i)
2247                         continue;
2248                 list = &(*thrd_lists)[i];
2249
2250                 /* Can we ensure no future writes from this thread may break
2251                  * the release seq? */
2252                 bool future_ordered = false;
2253
2254                 ModelAction *last = get_last_action(int_to_id(i));
2255                 Thread *th = get_thread(int_to_id(i));
2256                 if ((last && rf->happens_before(last)) ||
2257                                 !is_enabled(th) ||
2258                                 th->is_complete())
2259                         future_ordered = true;
2260
2261                 ASSERT(!th->is_model_thread() || future_ordered);
2262
2263                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2264                         const ModelAction *act = *rit;
2265                         /* Reach synchronization -> this thread is complete */
2266                         if (act->happens_before(release))
2267                                 break;
2268                         if (rf->happens_before(act)) {
2269                                 future_ordered = true;
2270                                 continue;
2271                         }
2272
2273                         /* Only non-RMW writes can break release sequences */
2274                         if (!act->is_write() || act->is_rmw())
2275                                 continue;
2276
2277                         /* Check modification order */
2278                         if (mo_graph->checkReachable(rf, act)) {
2279                                 /* rf --mo--> act */
2280                                 future_ordered = true;
2281                                 continue;
2282                         }
2283                         if (mo_graph->checkReachable(act, release))
2284                                 /* act --mo--> release */
2285                                 break;
2286                         if (mo_graph->checkReachable(release, act) &&
2287                                       mo_graph->checkReachable(act, rf)) {
2288                                 /* release --mo-> act --mo--> rf */
2289                                 return true; /* complete */
2290                         }
2291                         /* act may break release sequence */
2292                         pending->writes.push_back(act);
2293                         certain = false;
2294                 }
2295                 if (!future_ordered)
2296                         certain = false; /* This thread is uncertain */
2297         }
2298
2299         if (certain) {
2300                 release_heads->push_back(release);
2301                 pending->writes.clear();
2302         } else {
2303                 pending->release = release;
2304                 pending->rf = rf;
2305         }
2306         return certain;
2307 }
2308
2309 /**
2310  * An interface for getting the release sequence head(s) with which a
2311  * given ModelAction must synchronize. This function only returns a non-empty
2312  * result when it can locate a release sequence head with certainty. Otherwise,
2313  * it may mark the internal state of the ModelChecker so that it will handle
2314  * the release sequence at a later time, causing @a acquire to update its
2315  * synchronization at some later point in execution.
2316  *
2317  * @param acquire The 'acquire' action that may synchronize with a release
2318  * sequence
2319  * @param read The read action that may read from a release sequence; this may
2320  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2321  * when 'acquire' is a fence-acquire)
2322  * @param release_heads A pass-by-reference return parameter. Will be filled
2323  * with the head(s) of the release sequence(s), if they exists with certainty.
2324  * @see ModelChecker::release_seq_heads
2325  */
2326 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2327                 ModelAction *read, rel_heads_list_t *release_heads)
2328 {
2329         const ModelAction *rf = read->get_reads_from();
2330         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2331         sequence->acquire = acquire;
2332         sequence->read = read;
2333
2334         if (!release_seq_heads(rf, release_heads, sequence)) {
2335                 /* add act to 'lazy checking' list */
2336                 pending_rel_seqs->push_back(sequence);
2337         } else {
2338                 snapshot_free(sequence);
2339         }
2340 }
2341
2342 /**
2343  * Attempt to resolve all stashed operations that might synchronize with a
2344  * release sequence for a given location. This implements the "lazy" portion of
2345  * determining whether or not a release sequence was contiguous, since not all
2346  * modification order information is present at the time an action occurs.
2347  *
2348  * @param location The location/object that should be checked for release
2349  * sequence resolutions. A NULL value means to check all locations.
2350  * @param work_queue The work queue to which to add work items as they are
2351  * generated
2352  * @return True if any updates occurred (new synchronization, new mo_graph
2353  * edges)
2354  */
2355 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2356 {
2357         bool updated = false;
2358         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2359         while (it != pending_rel_seqs->end()) {
2360                 struct release_seq *pending = *it;
2361                 ModelAction *acquire = pending->acquire;
2362                 const ModelAction *read = pending->read;
2363
2364                 /* Only resolve sequences on the given location, if provided */
2365                 if (location && read->get_location() != location) {
2366                         it++;
2367                         continue;
2368                 }
2369
2370                 const ModelAction *rf = read->get_reads_from();
2371                 rel_heads_list_t release_heads;
2372                 bool complete;
2373                 complete = release_seq_heads(rf, &release_heads, pending);
2374                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2375                         if (!acquire->has_synchronized_with(release_heads[i])) {
2376                                 if (acquire->synchronize_with(release_heads[i]))
2377                                         updated = true;
2378                                 else
2379                                         set_bad_synchronization();
2380                         }
2381                 }
2382
2383                 if (updated) {
2384                         /* Re-check all pending release sequences */
2385                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2386                         /* Re-check read-acquire for mo_graph edges */
2387                         if (acquire->is_read())
2388                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2389
2390                         /* propagate synchronization to later actions */
2391                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2392                         for (; (*rit) != acquire; rit++) {
2393                                 ModelAction *propagate = *rit;
2394                                 if (acquire->happens_before(propagate)) {
2395                                         propagate->synchronize_with(acquire);
2396                                         /* Re-check 'propagate' for mo_graph edges */
2397                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2398                                 }
2399                         }
2400                 }
2401                 if (complete) {
2402                         it = pending_rel_seqs->erase(it);
2403                         snapshot_free(pending);
2404                 } else {
2405                         it++;
2406                 }
2407         }
2408
2409         // If we resolved promises or data races, see if we have realized a data race.
2410         checkDataRaces();
2411
2412         return updated;
2413 }
2414
2415 /**
2416  * Performs various bookkeeping operations for the current ModelAction. For
2417  * instance, adds action to the per-object, per-thread action vector and to the
2418  * action trace list of all thread actions.
2419  *
2420  * @param act is the ModelAction to add.
2421  */
2422 void ModelChecker::add_action_to_lists(ModelAction *act)
2423 {
2424         int tid = id_to_int(act->get_tid());
2425         ModelAction *uninit = NULL;
2426         int uninit_id = -1;
2427         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2428         if (list->empty() && act->is_atomic_var()) {
2429                 uninit = get_uninitialized_action(act);
2430                 uninit_id = id_to_int(uninit->get_tid());
2431                 list->push_front(uninit);
2432         }
2433         list->push_back(act);
2434
2435         action_trace->push_back(act);
2436         if (uninit)
2437                 action_trace->push_front(uninit);
2438
2439         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2440         if (tid >= (int)vec->size())
2441                 vec->resize(priv->next_thread_id);
2442         (*vec)[tid].push_back(act);
2443         if (uninit)
2444                 (*vec)[uninit_id].push_front(uninit);
2445
2446         if ((int)thrd_last_action->size() <= tid)
2447                 thrd_last_action->resize(get_num_threads());
2448         (*thrd_last_action)[tid] = act;
2449         if (uninit)
2450                 (*thrd_last_action)[uninit_id] = uninit;
2451
2452         if (act->is_fence() && act->is_release()) {
2453                 if ((int)thrd_last_fence_release->size() <= tid)
2454                         thrd_last_fence_release->resize(get_num_threads());
2455                 (*thrd_last_fence_release)[tid] = act;
2456         }
2457
2458         if (act->is_wait()) {
2459                 void *mutex_loc = (void *) act->get_value();
2460                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2461
2462                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2463                 if (tid >= (int)vec->size())
2464                         vec->resize(priv->next_thread_id);
2465                 (*vec)[tid].push_back(act);
2466         }
2467 }
2468
2469 /**
2470  * @brief Get the last action performed by a particular Thread
2471  * @param tid The thread ID of the Thread in question
2472  * @return The last action in the thread
2473  */
2474 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2475 {
2476         int threadid = id_to_int(tid);
2477         if (threadid < (int)thrd_last_action->size())
2478                 return (*thrd_last_action)[id_to_int(tid)];
2479         else
2480                 return NULL;
2481 }
2482
2483 /**
2484  * @brief Get the last fence release performed by a particular Thread
2485  * @param tid The thread ID of the Thread in question
2486  * @return The last fence release in the thread, if one exists; NULL otherwise
2487  */
2488 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2489 {
2490         int threadid = id_to_int(tid);
2491         if (threadid < (int)thrd_last_fence_release->size())
2492                 return (*thrd_last_fence_release)[id_to_int(tid)];
2493         else
2494                 return NULL;
2495 }
2496
2497 /**
2498  * Gets the last memory_order_seq_cst write (in the total global sequence)
2499  * performed on a particular object (i.e., memory location), not including the
2500  * current action.
2501  * @param curr The current ModelAction; also denotes the object location to
2502  * check
2503  * @return The last seq_cst write
2504  */
2505 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2506 {
2507         void *location = curr->get_location();
2508         action_list_t *list = get_safe_ptr_action(obj_map, location);
2509         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2510         action_list_t::reverse_iterator rit;
2511         for (rit = list->rbegin(); (*rit) != curr; rit++)
2512                 ;
2513         rit++; /* Skip past curr */
2514         for ( ; rit != list->rend(); rit++)
2515                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2516                         return *rit;
2517         return NULL;
2518 }
2519
2520 /**
2521  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2522  * performed in a particular thread, prior to a particular fence.
2523  * @param tid The ID of the thread to check
2524  * @param before_fence The fence from which to begin the search; if NULL, then
2525  * search for the most recent fence in the thread.
2526  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2527  */
2528 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2529 {
2530         /* All fences should have NULL location */
2531         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2532         action_list_t::reverse_iterator rit = list->rbegin();
2533
2534         if (before_fence) {
2535                 for (; rit != list->rend(); rit++)
2536                         if (*rit == before_fence)
2537                                 break;
2538
2539                 ASSERT(*rit == before_fence);
2540                 rit++;
2541         }
2542
2543         for (; rit != list->rend(); rit++)
2544                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2545                         return *rit;
2546         return NULL;
2547 }
2548
2549 /**
2550  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2551  * location). This function identifies the mutex according to the current
2552  * action, which is presumed to perform on the same mutex.
2553  * @param curr The current ModelAction; also denotes the object location to
2554  * check
2555  * @return The last unlock operation
2556  */
2557 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2558 {
2559         void *location = curr->get_location();
2560         action_list_t *list = get_safe_ptr_action(obj_map, location);
2561         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2562         action_list_t::reverse_iterator rit;
2563         for (rit = list->rbegin(); rit != list->rend(); rit++)
2564                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2565                         return *rit;
2566         return NULL;
2567 }
2568
2569 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2570 {
2571         ModelAction *parent = get_last_action(tid);
2572         if (!parent)
2573                 parent = get_thread(tid)->get_creation();
2574         return parent;
2575 }
2576
2577 /**
2578  * Returns the clock vector for a given thread.
2579  * @param tid The thread whose clock vector we want
2580  * @return Desired clock vector
2581  */
2582 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2583 {
2584         return get_parent_action(tid)->get_cv();
2585 }
2586
2587 /**
2588  * @brief Find the promise (if any) to resolve for the current action and
2589  * remove it from the pending promise vector
2590  * @param curr The current ModelAction. Should be a write.
2591  * @return The Promise to resolve, if any; otherwise NULL
2592  */
2593 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2594 {
2595         for (unsigned int i = 0; i < promises->size(); i++)
2596                 if (curr->get_node()->get_promise(i)) {
2597                         Promise *ret = (*promises)[i];
2598                         promises->erase(promises->begin() + i);
2599                         return ret;
2600                 }
2601         return NULL;
2602 }
2603
2604 /**
2605  * Resolve a Promise with a current write.
2606  * @param write The ModelAction that is fulfilling Promises
2607  * @param promise The Promise to resolve
2608  * @return True if the Promise was successfully resolved; false otherwise
2609  */
2610 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2611 {
2612         ModelVector<ModelAction *> actions_to_check;
2613
2614         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2615                 ModelAction *read = promise->get_reader(i);
2616                 read_from(read, write);
2617                 actions_to_check.push_back(read);
2618         }
2619         /* Make sure the promise's value matches the write's value */
2620         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2621         if (!mo_graph->resolvePromise(promise, write))
2622                 priv->failed_promise = true;
2623
2624         /**
2625          * @todo  It is possible to end up in an inconsistent state, where a
2626          * "resolved" promise may still be referenced if
2627          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2628          *
2629          * Note that the inconsistency only matters when dumping mo_graph to
2630          * file.
2631          *
2632          * delete promise;
2633          */
2634
2635         //Check whether reading these writes has made threads unable to
2636         //resolve promises
2637         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2638                 ModelAction *read = actions_to_check[i];
2639                 mo_check_promises(read, true);
2640         }
2641
2642         return true;
2643 }
2644
2645 /**
2646  * Compute the set of promises that could potentially be satisfied by this
2647  * action. Note that the set computation actually appears in the Node, not in
2648  * ModelChecker.
2649  * @param curr The ModelAction that may satisfy promises
2650  */
2651 void ModelChecker::compute_promises(ModelAction *curr)
2652 {
2653         for (unsigned int i = 0; i < promises->size(); i++) {
2654                 Promise *promise = (*promises)[i];
2655                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2656                         continue;
2657
2658                 bool satisfy = true;
2659                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2660                         const ModelAction *act = promise->get_reader(j);
2661                         if (act->happens_before(curr) ||
2662                                         act->could_synchronize_with(curr)) {
2663                                 satisfy = false;
2664                                 break;
2665                         }
2666                 }
2667                 if (satisfy)
2668                         curr->get_node()->set_promise(i);
2669         }
2670 }
2671
2672 /** Checks promises in response to change in ClockVector Threads. */
2673 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2674 {
2675         for (unsigned int i = 0; i < promises->size(); i++) {
2676                 Promise *promise = (*promises)[i];
2677                 if (!promise->thread_is_available(tid))
2678                         continue;
2679                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2680                         const ModelAction *act = promise->get_reader(j);
2681                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2682                                         merge_cv->synchronized_since(act)) {
2683                                 if (promise->eliminate_thread(tid)) {
2684                                         /* Promise has failed */
2685                                         priv->failed_promise = true;
2686                                         return;
2687                                 }
2688                         }
2689                 }
2690         }
2691 }
2692
2693 void ModelChecker::check_promises_thread_disabled()
2694 {
2695         for (unsigned int i = 0; i < promises->size(); i++) {
2696                 Promise *promise = (*promises)[i];
2697                 if (promise->has_failed()) {
2698                         priv->failed_promise = true;
2699                         return;
2700                 }
2701         }
2702 }
2703
2704 /**
2705  * @brief Checks promises in response to addition to modification order for
2706  * threads.
2707  *
2708  * We test whether threads are still available for satisfying promises after an
2709  * addition to our modification order constraints. Those that are unavailable
2710  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2711  * that promise has failed.
2712  *
2713  * @param act The ModelAction which updated the modification order
2714  * @param is_read_check Should be true if act is a read and we must check for
2715  * updates to the store from which it read (there is a distinction here for
2716  * RMW's, which are both a load and a store)
2717  */
2718 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2719 {
2720         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2721
2722         for (unsigned int i = 0; i < promises->size(); i++) {
2723                 Promise *promise = (*promises)[i];
2724
2725                 // Is this promise on the same location?
2726                 if (!promise->same_location(write))
2727                         continue;
2728
2729                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2730                         const ModelAction *pread = promise->get_reader(j);
2731                         if (!pread->happens_before(act))
2732                                continue;
2733                         if (mo_graph->checkPromise(write, promise)) {
2734                                 priv->failed_promise = true;
2735                                 return;
2736                         }
2737                         break;
2738                 }
2739
2740                 // Don't do any lookups twice for the same thread
2741                 if (!promise->thread_is_available(act->get_tid()))
2742                         continue;
2743
2744                 if (mo_graph->checkReachable(promise, write)) {
2745                         if (mo_graph->checkPromise(write, promise)) {
2746                                 priv->failed_promise = true;
2747                                 return;
2748                         }
2749                 }
2750         }
2751 }
2752
2753 /**
2754  * Compute the set of writes that may break the current pending release
2755  * sequence. This information is extracted from previou release sequence
2756  * calculations.
2757  *
2758  * @param curr The current ModelAction. Must be a release sequence fixup
2759  * action.
2760  */
2761 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2762 {
2763         if (pending_rel_seqs->empty())
2764                 return;
2765
2766         struct release_seq *pending = pending_rel_seqs->back();
2767         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2768                 const ModelAction *write = pending->writes[i];
2769                 curr->get_node()->add_relseq_break(write);
2770         }
2771
2772         /* NULL means don't break the sequence; just synchronize */
2773         curr->get_node()->add_relseq_break(NULL);
2774 }
2775
2776 /**
2777  * Build up an initial set of all past writes that this 'read' action may read
2778  * from, as well as any previously-observed future values that must still be valid.
2779  *
2780  * @param curr is the current ModelAction that we are exploring; it must be a
2781  * 'read' operation.
2782  */
2783 void ModelChecker::build_may_read_from(ModelAction *curr)
2784 {
2785         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2786         unsigned int i;
2787         ASSERT(curr->is_read());
2788
2789         ModelAction *last_sc_write = NULL;
2790
2791         if (curr->is_seqcst())
2792                 last_sc_write = get_last_seq_cst_write(curr);
2793
2794         /* Iterate over all threads */
2795         for (i = 0; i < thrd_lists->size(); i++) {
2796                 /* Iterate over actions in thread, starting from most recent */
2797                 action_list_t *list = &(*thrd_lists)[i];
2798                 action_list_t::reverse_iterator rit;
2799                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2800                         ModelAction *act = *rit;
2801
2802                         /* Only consider 'write' actions */
2803                         if (!act->is_write() || act == curr)
2804                                 continue;
2805
2806                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2807                         bool allow_read = true;
2808
2809                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2810                                 allow_read = false;
2811                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2812                                 allow_read = false;
2813
2814                         if (allow_read) {
2815                                 /* Only add feasible reads */
2816                                 mo_graph->startChanges();
2817                                 r_modification_order(curr, act);
2818                                 if (!is_infeasible())
2819                                         curr->get_node()->add_read_from_past(act);
2820                                 mo_graph->rollbackChanges();
2821                         }
2822
2823                         /* Include at most one act per-thread that "happens before" curr */
2824                         if (act->happens_before(curr))
2825                                 break;
2826                 }
2827         }
2828
2829         /* Inherit existing, promised future values */
2830         for (i = 0; i < promises->size(); i++) {
2831                 const Promise *promise = (*promises)[i];
2832                 const ModelAction *promise_read = promise->get_reader(0);
2833                 if (promise_read->same_var(curr)) {
2834                         /* Only add feasible future-values */
2835                         mo_graph->startChanges();
2836                         r_modification_order(curr, promise);
2837                         if (!is_infeasible())
2838                                 curr->get_node()->add_read_from_promise(promise_read);
2839                         mo_graph->rollbackChanges();
2840                 }
2841         }
2842
2843         /* We may find no valid may-read-from only if the execution is doomed */
2844         if (!curr->get_node()->read_from_size()) {
2845                 priv->no_valid_reads = true;
2846                 set_assert();
2847         }
2848
2849         if (DBG_ENABLED()) {
2850                 model_print("Reached read action:\n");
2851                 curr->print();
2852                 model_print("Printing read_from_past\n");
2853                 curr->get_node()->print_read_from_past();
2854                 model_print("End printing read_from_past\n");
2855         }
2856 }
2857
2858 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2859 {
2860         for ( ; write != NULL; write = write->get_reads_from()) {
2861                 /* UNINIT actions don't have a Node, and they never sleep */
2862                 if (write->is_uninitialized())
2863                         return true;
2864                 Node *prevnode = write->get_node()->get_parent();
2865
2866                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2867                 if (write->is_release() && thread_sleep)
2868                         return true;
2869                 if (!write->is_rmw())
2870                         return false;
2871         }
2872         return true;
2873 }
2874
2875 /**
2876  * @brief Get an action representing an uninitialized atomic
2877  *
2878  * This function may create a new one or try to retrieve one from the NodeStack
2879  *
2880  * @param curr The current action, which prompts the creation of an UNINIT action
2881  * @return A pointer to the UNINIT ModelAction
2882  */
2883 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2884 {
2885         Node *node = curr->get_node();
2886         ModelAction *act = node->get_uninit_action();
2887         if (!act) {
2888                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2889                 node->set_uninit_action(act);
2890         }
2891         act->create_cv(NULL);
2892         return act;
2893 }
2894
2895 static void print_list(action_list_t *list)
2896 {
2897         action_list_t::iterator it;
2898
2899         model_print("---------------------------------------------------------------------\n");
2900
2901         unsigned int hash = 0;
2902
2903         for (it = list->begin(); it != list->end(); it++) {
2904                 const ModelAction *act = *it;
2905                 if (act->get_seq_number() > 0)
2906                         act->print();
2907                 hash = hash^(hash<<3)^((*it)->hash());
2908         }
2909         model_print("HASH %u\n", hash);
2910         model_print("---------------------------------------------------------------------\n");
2911 }
2912
2913 #if SUPPORT_MOD_ORDER_DUMP
2914 void ModelChecker::dumpGraph(char *filename) const
2915 {
2916         char buffer[200];
2917         sprintf(buffer, "%s.dot", filename);
2918         FILE *file = fopen(buffer, "w");
2919         fprintf(file, "digraph %s {\n", filename);
2920         mo_graph->dumpNodes(file);
2921         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2922
2923         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2924                 ModelAction *act = *it;
2925                 if (act->is_read()) {
2926                         mo_graph->dot_print_node(file, act);
2927                         if (act->get_reads_from())
2928                                 mo_graph->dot_print_edge(file,
2929                                                 act->get_reads_from(),
2930                                                 act,
2931                                                 "label=\"rf\", color=red, weight=2");
2932                         else
2933                                 mo_graph->dot_print_edge(file,
2934                                                 act->get_reads_from_promise(),
2935                                                 act,
2936                                                 "label=\"rf\", color=red");
2937                 }
2938                 if (thread_array[act->get_tid()]) {
2939                         mo_graph->dot_print_edge(file,
2940                                         thread_array[id_to_int(act->get_tid())],
2941                                         act,
2942                                         "label=\"sb\", color=blue, weight=400");
2943                 }
2944
2945                 thread_array[act->get_tid()] = act;
2946         }
2947         fprintf(file, "}\n");
2948         model_free(thread_array);
2949         fclose(file);
2950 }
2951 #endif
2952
2953 /** @brief Prints an execution trace summary. */
2954 void ModelChecker::print_summary() const
2955 {
2956 #if SUPPORT_MOD_ORDER_DUMP
2957         char buffername[100];
2958         sprintf(buffername, "exec%04u", stats.num_total);
2959         mo_graph->dumpGraphToFile(buffername);
2960         sprintf(buffername, "graph%04u", stats.num_total);
2961         dumpGraph(buffername);
2962 #endif
2963
2964         model_print("Execution %d:", stats.num_total);
2965         if (isfeasibleprefix()) {
2966                 if (scheduler->all_threads_sleeping())
2967                         model_print(" SLEEP-SET REDUNDANT");
2968                 model_print("\n");
2969         } else
2970                 print_infeasibility(" INFEASIBLE");
2971         print_list(action_trace);
2972         model_print("\n");
2973         if (!promises->empty()) {
2974                 model_print("Pending promises:\n");
2975                 for (unsigned int i = 0; i < promises->size(); i++) {
2976                         model_print(" [P%u] ", i);
2977                         (*promises)[i]->print();
2978                 }
2979                 model_print("\n");
2980         }
2981 }
2982
2983 /**
2984  * Add a Thread to the system for the first time. Should only be called once
2985  * per thread.
2986  * @param t The Thread to add
2987  */
2988 void ModelChecker::add_thread(Thread *t)
2989 {
2990         thread_map->put(id_to_int(t->get_id()), t);
2991         scheduler->add_thread(t);
2992 }
2993
2994 /**
2995  * @brief Get a Thread reference by its ID
2996  * @param tid The Thread's ID
2997  * @return A Thread reference
2998  */
2999 Thread * ModelChecker::get_thread(thread_id_t tid) const
3000 {
3001         return thread_map->get(id_to_int(tid));
3002 }
3003
3004 /**
3005  * @brief Get a reference to the Thread in which a ModelAction was executed
3006  * @param act The ModelAction
3007  * @return A Thread reference
3008  */
3009 Thread * ModelChecker::get_thread(const ModelAction *act) const
3010 {
3011         return get_thread(act->get_tid());
3012 }
3013
3014 /**
3015  * @brief Get a Promise's "promise number"
3016  *
3017  * A "promise number" is an index number that is unique to a promise, valid
3018  * only for a specific snapshot of an execution trace. Promises may come and go
3019  * as they are generated an resolved, so an index only retains meaning for the
3020  * current snapshot.
3021  *
3022  * @param promise The Promise to check
3023  * @return The promise index, if the promise still is valid; otherwise -1
3024  */
3025 int ModelChecker::get_promise_number(const Promise *promise) const
3026 {
3027         for (unsigned int i = 0; i < promises->size(); i++)
3028                 if ((*promises)[i] == promise)
3029                         return i;
3030         /* Not found */
3031         return -1;
3032 }
3033
3034 /**
3035  * @brief Check if a Thread is currently enabled
3036  * @param t The Thread to check
3037  * @return True if the Thread is currently enabled
3038  */
3039 bool ModelChecker::is_enabled(Thread *t) const
3040 {
3041         return scheduler->is_enabled(t);
3042 }
3043
3044 /**
3045  * @brief Check if a Thread is currently enabled
3046  * @param tid The ID of the Thread to check
3047  * @return True if the Thread is currently enabled
3048  */
3049 bool ModelChecker::is_enabled(thread_id_t tid) const
3050 {
3051         return scheduler->is_enabled(tid);
3052 }
3053
3054 /**
3055  * Switch from a model-checker context to a user-thread context. This is the
3056  * complement of ModelChecker::switch_to_master and must be called from the
3057  * model-checker context
3058  *
3059  * @param thread The user-thread to switch to
3060  */
3061 void ModelChecker::switch_from_master(Thread *thread)
3062 {
3063         scheduler->set_current_thread(thread);
3064         Thread::swap(&system_context, thread);
3065 }
3066
3067 /**
3068  * Switch from a user-context to the "master thread" context (a.k.a. system
3069  * context). This switch is made with the intention of exploring a particular
3070  * model-checking action (described by a ModelAction object). Must be called
3071  * from a user-thread context.
3072  *
3073  * @param act The current action that will be explored. May be NULL only if
3074  * trace is exiting via an assertion (see ModelChecker::set_assert and
3075  * ModelChecker::has_asserted).
3076  * @return Return the value returned by the current action
3077  */
3078 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3079 {
3080         DBG();
3081         Thread *old = thread_current();
3082         scheduler->set_current_thread(NULL);
3083         ASSERT(!old->get_pending());
3084         old->set_pending(act);
3085         if (Thread::swap(old, &system_context) < 0) {
3086                 perror("swap threads");
3087                 exit(EXIT_FAILURE);
3088         }
3089         return old->get_return_value();
3090 }
3091
3092 /**
3093  * Takes the next step in the execution, if possible.
3094  * @param curr The current step to take
3095  * @return Returns the next Thread to run, if any; NULL if this execution
3096  * should terminate
3097  */
3098 Thread * ModelChecker::take_step(ModelAction *curr)
3099 {
3100         Thread *curr_thrd = get_thread(curr);
3101         ASSERT(curr_thrd->get_state() == THREAD_READY);
3102
3103         ASSERT(check_action_enabled(curr)); /* May have side effects? */
3104         curr = check_current_action(curr);
3105         ASSERT(curr);
3106
3107         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3108                 scheduler->remove_thread(curr_thrd);
3109
3110         return action_select_next_thread(curr);
3111 }
3112
3113 /** Wrapper to run the user's main function, with appropriate arguments */
3114 void user_main_wrapper(void *)
3115 {
3116         user_main(model->params.argc, model->params.argv);
3117 }
3118
3119 bool ModelChecker::should_terminate_execution()
3120 {
3121         /* Infeasible -> don't take any more steps */
3122         if (is_infeasible())
3123                 return true;
3124         else if (isfeasibleprefix() && have_bug_reports()) {
3125                 set_assert();
3126                 return true;
3127         }
3128
3129         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3130                 return true;
3131         return false;
3132 }
3133
3134 /** @brief Run ModelChecker for the user program */
3135 void ModelChecker::run()
3136 {
3137         do {
3138                 thrd_t user_thread;
3139                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3140                 add_thread(t);
3141
3142                 do {
3143                         /*
3144                          * Stash next pending action(s) for thread(s). There
3145                          * should only need to stash one thread's action--the
3146                          * thread which just took a step--plus the first step
3147                          * for any newly-created thread
3148                          */
3149                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3150                                 thread_id_t tid = int_to_id(i);
3151                                 Thread *thr = get_thread(tid);
3152                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3153                                         switch_from_master(thr);
3154                                         if (thr->is_waiting_on(thr))
3155                                                 assert_bug("Deadlock detected (thread %u)", i);
3156                                 }
3157                         }
3158
3159                         /* Don't schedule threads which should be disabled */
3160                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3161                                 Thread *th = get_thread(int_to_id(i));
3162                                 ModelAction *act = th->get_pending();
3163                                 if (act && is_enabled(th) && !check_action_enabled(act)) {
3164                                         scheduler->sleep(th);
3165                                 }
3166                         }
3167
3168                         /* Catch assertions from prior take_step or from
3169                          * between-ModelAction bugs (e.g., data races) */
3170                         if (has_asserted())
3171                                 break;
3172
3173                         if (!t)
3174                                 t = get_next_thread();
3175                         if (!t || t->is_model_thread())
3176                                 break;
3177
3178                         /* Consume the next action for a Thread */
3179                         ModelAction *curr = t->get_pending();
3180                         t->set_pending(NULL);
3181                         t = take_step(curr);
3182                 } while (!should_terminate_execution());
3183
3184                 /*
3185                  * Launch end-of-execution release sequence fixups only when
3186                  * the execution is otherwise feasible AND there are:
3187                  *
3188                  * (1) pending release sequences
3189                  * (2) pending assertions that could be invalidated by a change
3190                  * in clock vectors (i.e., data races)
3191                  * (3) no pending promises
3192                  */
3193                 while (!pending_rel_seqs->empty() &&
3194                                 is_feasible_prefix_ignore_relseq() &&
3195                                 !unrealizedraces.empty()) {
3196                         model_print("*** WARNING: release sequence fixup action "
3197                                         "(%zu pending release seuqence(s)) ***\n",
3198                                         pending_rel_seqs->size());
3199                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3200                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3201                                         model_thread);
3202                         take_step(fixup);
3203                 };
3204         } while (next_execution());
3205
3206         model_print("******* Model-checking complete: *******\n");
3207         print_stats();
3208 }