cyclegraph: propagate RMW atomicity edges down the chain
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new SnapVector<Promise *>()),
90         futurevalues(new SnapVector<struct PendingFutureValue>()),
91         pending_rel_seqs(new SnapVector<struct release_seq *>()),
92         thrd_last_action(new SnapVector<ModelAction *>(1)),
93         thrd_last_fence_release(new SnapVector<ModelAction *>()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         SnapVector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new SnapVector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_misc()) {
264                         /* The next node will try to satisfy a different misc_index values. */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else if (nextnode->increment_promise()) {
268                         /* The next node will try to satisfy a different set of promises. */
269                         tid = next->get_tid();
270                         node_stack->pop_restofstack(2);
271                 } else if (nextnode->increment_read_from()) {
272                         /* The next node will read from a different value. */
273                         tid = next->get_tid();
274                         node_stack->pop_restofstack(2);
275                 } else if (nextnode->increment_relseq_break()) {
276                         /* The next node will try to resolve a release sequence differently */
277                         tid = next->get_tid();
278                         node_stack->pop_restofstack(2);
279                 } else {
280                         ASSERT(prevnode);
281                         /* Make a different thread execute for next step */
282                         scheduler->add_sleep(get_thread(next->get_tid()));
283                         tid = prevnode->get_next_backtrack();
284                         /* Make sure the backtracked thread isn't sleeping. */
285                         node_stack->pop_restofstack(1);
286                         if (diverge == earliest_diverge) {
287                                 earliest_diverge = prevnode->get_action();
288                         }
289                 }
290                 /* Start the round robin scheduler from this thread id */
291                 scheduler->set_scheduler_thread(tid);
292                 /* The correct sleep set is in the parent node. */
293                 execute_sleep_set();
294
295                 DEBUG("*** Divergence point ***\n");
296
297                 diverge = NULL;
298         } else {
299                 tid = next->get_tid();
300         }
301         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
302         ASSERT(tid != THREAD_ID_T_NONE);
303         return thread_map->get(id_to_int(tid));
304 }
305
306 /**
307  * We need to know what the next actions of all threads in the sleep
308  * set will be.  This method computes them and stores the actions at
309  * the corresponding thread object's pending action.
310  */
311
312 void ModelChecker::execute_sleep_set()
313 {
314         for (unsigned int i = 0; i < get_num_threads(); i++) {
315                 thread_id_t tid = int_to_id(i);
316                 Thread *thr = get_thread(tid);
317                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
318                         thr->get_pending()->set_sleep_flag();
319                 }
320         }
321 }
322
323 /**
324  * @brief Should the current action wake up a given thread?
325  *
326  * @param curr The current action
327  * @param thread The thread that we might wake up
328  * @return True, if we should wake up the sleeping thread; false otherwise
329  */
330 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
331 {
332         const ModelAction *asleep = thread->get_pending();
333         /* Don't allow partial RMW to wake anyone up */
334         if (curr->is_rmwr())
335                 return false;
336         /* Synchronizing actions may have been backtracked */
337         if (asleep->could_synchronize_with(curr))
338                 return true;
339         /* All acquire/release fences and fence-acquire/store-release */
340         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
341                 return true;
342         /* Fence-release + store can awake load-acquire on the same location */
343         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
344                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
345                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
346                         return true;
347         }
348         return false;
349 }
350
351 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
352 {
353         for (unsigned int i = 0; i < get_num_threads(); i++) {
354                 Thread *thr = get_thread(int_to_id(i));
355                 if (scheduler->is_sleep_set(thr)) {
356                         if (should_wake_up(curr, thr))
357                                 /* Remove this thread from sleep set */
358                                 scheduler->remove_sleep(thr);
359                 }
360         }
361 }
362
363 /** @brief Alert the model-checker that an incorrectly-ordered
364  * synchronization was made */
365 void ModelChecker::set_bad_synchronization()
366 {
367         priv->bad_synchronization = true;
368 }
369
370 /**
371  * Check whether the current trace has triggered an assertion which should halt
372  * its execution.
373  *
374  * @return True, if the execution should be aborted; false otherwise
375  */
376 bool ModelChecker::has_asserted() const
377 {
378         return priv->asserted;
379 }
380
381 /**
382  * Trigger a trace assertion which should cause this execution to be halted.
383  * This can be due to a detected bug or due to an infeasibility that should
384  * halt ASAP.
385  */
386 void ModelChecker::set_assert()
387 {
388         priv->asserted = true;
389 }
390
391 /**
392  * Check if we are in a deadlock. Should only be called at the end of an
393  * execution, although it should not give false positives in the middle of an
394  * execution (there should be some ENABLED thread).
395  *
396  * @return True if program is in a deadlock; false otherwise
397  */
398 bool ModelChecker::is_deadlocked() const
399 {
400         bool blocking_threads = false;
401         for (unsigned int i = 0; i < get_num_threads(); i++) {
402                 thread_id_t tid = int_to_id(i);
403                 if (is_enabled(tid))
404                         return false;
405                 Thread *t = get_thread(tid);
406                 if (!t->is_model_thread() && t->get_pending())
407                         blocking_threads = true;
408         }
409         return blocking_threads;
410 }
411
412 /**
413  * Check if a Thread has entered a circular wait deadlock situation. This will
414  * not check other threads for potential deadlock situations, and may miss
415  * deadlocks involving WAIT.
416  *
417  * @param t The thread which may have entered a deadlock
418  * @return True if this Thread entered a deadlock; false otherwise
419  */
420 bool ModelChecker::is_circular_wait(const Thread *t) const
421 {
422         for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on())
423                 if (waiting == t)
424                         return true;
425         return false;
426 }
427
428 /**
429  * Check if this is a complete execution. That is, have all thread completed
430  * execution (rather than exiting because sleep sets have forced a redundant
431  * execution).
432  *
433  * @return True if the execution is complete.
434  */
435 bool ModelChecker::is_complete_execution() const
436 {
437         for (unsigned int i = 0; i < get_num_threads(); i++)
438                 if (is_enabled(int_to_id(i)))
439                         return false;
440         return true;
441 }
442
443 /**
444  * @brief Assert a bug in the executing program.
445  *
446  * Use this function to assert any sort of bug in the user program. If the
447  * current trace is feasible (actually, a prefix of some feasible execution),
448  * then this execution will be aborted, printing the appropriate message. If
449  * the current trace is not yet feasible, the error message will be stashed and
450  * printed if the execution ever becomes feasible.
451  *
452  * @param msg Descriptive message for the bug (do not include newline char)
453  * @return True if bug is immediately-feasible
454  */
455 bool ModelChecker::assert_bug(const char *msg)
456 {
457         priv->bugs.push_back(new bug_message(msg));
458
459         if (isfeasibleprefix()) {
460                 set_assert();
461                 return true;
462         }
463         return false;
464 }
465
466 /**
467  * @brief Assert a bug in the executing program, asserted by a user thread
468  * @see ModelChecker::assert_bug
469  * @param msg Descriptive message for the bug (do not include newline char)
470  */
471 void ModelChecker::assert_user_bug(const char *msg)
472 {
473         /* If feasible bug, bail out now */
474         if (assert_bug(msg))
475                 switch_to_master(NULL);
476 }
477
478 /** @return True, if any bugs have been reported for this execution */
479 bool ModelChecker::have_bug_reports() const
480 {
481         return priv->bugs.size() != 0;
482 }
483
484 /** @brief Print bug report listing for this execution (if any bugs exist) */
485 void ModelChecker::print_bugs() const
486 {
487         if (have_bug_reports()) {
488                 model_print("Bug report: %zu bug%s detected\n",
489                                 priv->bugs.size(),
490                                 priv->bugs.size() > 1 ? "s" : "");
491                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
492                         priv->bugs[i]->print();
493         }
494 }
495
496 /**
497  * @brief Record end-of-execution stats
498  *
499  * Must be run when exiting an execution. Records various stats.
500  * @see struct execution_stats
501  */
502 void ModelChecker::record_stats()
503 {
504         stats.num_total++;
505         if (!isfeasibleprefix())
506                 stats.num_infeasible++;
507         else if (have_bug_reports())
508                 stats.num_buggy_executions++;
509         else if (is_complete_execution())
510                 stats.num_complete++;
511         else {
512                 stats.num_redundant++;
513
514                 /**
515                  * @todo We can violate this ASSERT() when fairness/sleep sets
516                  * conflict to cause an execution to terminate, e.g. with:
517                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
518                  */
519                 //ASSERT(scheduler->all_threads_sleeping());
520         }
521 }
522
523 /** @brief Print execution stats */
524 void ModelChecker::print_stats() const
525 {
526         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
527         model_print("Number of redundant executions: %d\n", stats.num_redundant);
528         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
529         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
530         model_print("Total executions: %d\n", stats.num_total);
531         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
532 }
533
534 /**
535  * @brief End-of-exeuction print
536  * @param printbugs Should any existing bugs be printed?
537  */
538 void ModelChecker::print_execution(bool printbugs) const
539 {
540         print_program_output();
541
542         if (params.verbose) {
543                 model_print("Earliest divergence point since last feasible execution:\n");
544                 if (earliest_diverge)
545                         earliest_diverge->print();
546                 else
547                         model_print("(Not set)\n");
548
549                 model_print("\n");
550                 print_stats();
551         }
552
553         /* Don't print invalid bugs */
554         if (printbugs)
555                 print_bugs();
556
557         model_print("\n");
558         print_summary();
559 }
560
561 /**
562  * Queries the model-checker for more executions to explore and, if one
563  * exists, resets the model-checker state to execute a new execution.
564  *
565  * @return If there are more executions to explore, return true. Otherwise,
566  * return false.
567  */
568 bool ModelChecker::next_execution()
569 {
570         DBG();
571         /* Is this execution a feasible execution that's worth bug-checking? */
572         bool complete = isfeasibleprefix() && (is_complete_execution() ||
573                         have_bug_reports());
574
575         /* End-of-execution bug checks */
576         if (complete) {
577                 if (is_deadlocked())
578                         assert_bug("Deadlock detected");
579
580                 checkDataRaces();
581         }
582
583         record_stats();
584
585         /* Output */
586         if (params.verbose || (complete && have_bug_reports()))
587                 print_execution(complete);
588         else
589                 clear_program_output();
590
591         if (complete)
592                 earliest_diverge = NULL;
593
594         if ((diverge = get_next_backtrack()) == NULL)
595                 return false;
596
597         if (DBG_ENABLED()) {
598                 model_print("Next execution will diverge at:\n");
599                 diverge->print();
600         }
601
602         reset_to_initial_state();
603         return true;
604 }
605
606 /**
607  * @brief Find the last fence-related backtracking conflict for a ModelAction
608  *
609  * This function performs the search for the most recent conflicting action
610  * against which we should perform backtracking, as affected by fence
611  * operations. This includes pairs of potentially-synchronizing actions which
612  * occur due to fence-acquire or fence-release, and hence should be explored in
613  * the opposite execution order.
614  *
615  * @param act The current action
616  * @return The most recent action which conflicts with act due to fences
617  */
618 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
619 {
620         /* Only perform release/acquire fence backtracking for stores */
621         if (!act->is_write())
622                 return NULL;
623
624         /* Find a fence-release (or, act is a release) */
625         ModelAction *last_release;
626         if (act->is_release())
627                 last_release = act;
628         else
629                 last_release = get_last_fence_release(act->get_tid());
630         if (!last_release)
631                 return NULL;
632
633         /* Skip past the release */
634         action_list_t *list = action_trace;
635         action_list_t::reverse_iterator rit;
636         for (rit = list->rbegin(); rit != list->rend(); rit++)
637                 if (*rit == last_release)
638                         break;
639         ASSERT(rit != list->rend());
640
641         /* Find a prior:
642          *   load-acquire
643          * or
644          *   load --sb-> fence-acquire */
645         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
646         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
647         bool found_acquire_fences = false;
648         for ( ; rit != list->rend(); rit++) {
649                 ModelAction *prev = *rit;
650                 if (act->same_thread(prev))
651                         continue;
652
653                 int tid = id_to_int(prev->get_tid());
654
655                 if (prev->is_read() && act->same_var(prev)) {
656                         if (prev->is_acquire()) {
657                                 /* Found most recent load-acquire, don't need
658                                  * to search for more fences */
659                                 if (!found_acquire_fences)
660                                         return NULL;
661                         } else {
662                                 prior_loads[tid] = prev;
663                         }
664                 }
665                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
666                         found_acquire_fences = true;
667                         acquire_fences[tid] = prev;
668                 }
669         }
670
671         ModelAction *latest_backtrack = NULL;
672         for (unsigned int i = 0; i < acquire_fences.size(); i++)
673                 if (acquire_fences[i] && prior_loads[i])
674                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
675                                 latest_backtrack = acquire_fences[i];
676         return latest_backtrack;
677 }
678
679 /**
680  * @brief Find the last backtracking conflict for a ModelAction
681  *
682  * This function performs the search for the most recent conflicting action
683  * against which we should perform backtracking. This primary includes pairs of
684  * synchronizing actions which should be explored in the opposite execution
685  * order.
686  *
687  * @param act The current action
688  * @return The most recent action which conflicts with act
689  */
690 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
691 {
692         switch (act->get_type()) {
693         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
694         case ATOMIC_READ:
695         case ATOMIC_WRITE:
696         case ATOMIC_RMW: {
697                 ModelAction *ret = NULL;
698
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (prev->could_synchronize_with(act)) {
705                                 ret = prev;
706                                 break;
707                         }
708                 }
709
710                 ModelAction *ret2 = get_last_fence_conflict(act);
711                 if (!ret2)
712                         return ret;
713                 if (!ret)
714                         return ret2;
715                 if (*ret < *ret2)
716                         return ret2;
717                 return ret;
718         }
719         case ATOMIC_LOCK:
720         case ATOMIC_TRYLOCK: {
721                 /* linear search: from most recent to oldest */
722                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
723                 action_list_t::reverse_iterator rit;
724                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
725                         ModelAction *prev = *rit;
726                         if (act->is_conflicting_lock(prev))
727                                 return prev;
728                 }
729                 break;
730         }
731         case ATOMIC_UNLOCK: {
732                 /* linear search: from most recent to oldest */
733                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
734                 action_list_t::reverse_iterator rit;
735                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
736                         ModelAction *prev = *rit;
737                         if (!act->same_thread(prev) && prev->is_failed_trylock())
738                                 return prev;
739                 }
740                 break;
741         }
742         case ATOMIC_WAIT: {
743                 /* linear search: from most recent to oldest */
744                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
745                 action_list_t::reverse_iterator rit;
746                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
747                         ModelAction *prev = *rit;
748                         if (!act->same_thread(prev) && prev->is_failed_trylock())
749                                 return prev;
750                         if (!act->same_thread(prev) && prev->is_notify())
751                                 return prev;
752                 }
753                 break;
754         }
755
756         case ATOMIC_NOTIFY_ALL:
757         case ATOMIC_NOTIFY_ONE: {
758                 /* linear search: from most recent to oldest */
759                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
760                 action_list_t::reverse_iterator rit;
761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
762                         ModelAction *prev = *rit;
763                         if (!act->same_thread(prev) && prev->is_wait())
764                                 return prev;
765                 }
766                 break;
767         }
768         default:
769                 break;
770         }
771         return NULL;
772 }
773
774 /** This method finds backtracking points where we should try to
775  * reorder the parameter ModelAction against.
776  *
777  * @param the ModelAction to find backtracking points for.
778  */
779 void ModelChecker::set_backtracking(ModelAction *act)
780 {
781         Thread *t = get_thread(act);
782         ModelAction *prev = get_last_conflict(act);
783         if (prev == NULL)
784                 return;
785
786         Node *node = prev->get_node()->get_parent();
787
788         int low_tid, high_tid;
789         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
790                 low_tid = id_to_int(act->get_tid());
791                 high_tid = low_tid + 1;
792         } else {
793                 low_tid = 0;
794                 high_tid = get_num_threads();
795         }
796
797         for (int i = low_tid; i < high_tid; i++) {
798                 thread_id_t tid = int_to_id(i);
799
800                 /* Make sure this thread can be enabled here. */
801                 if (i >= node->get_num_threads())
802                         break;
803
804                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
805                 if (node->enabled_status(tid) != THREAD_ENABLED)
806                         continue;
807
808                 /* Check if this has been explored already */
809                 if (node->has_been_explored(tid))
810                         continue;
811
812                 /* See if fairness allows */
813                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
814                         bool unfair = false;
815                         for (int t = 0; t < node->get_num_threads(); t++) {
816                                 thread_id_t tother = int_to_id(t);
817                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
818                                         unfair = true;
819                                         break;
820                                 }
821                         }
822                         if (unfair)
823                                 continue;
824                 }
825
826                 /* See if CHESS-like yield fairness allows */
827                 if (model->params.yieldon) {
828                         bool unfair = false;
829                         for (int t = 0; t < node->get_num_threads(); t++) {
830                                 thread_id_t tother = int_to_id(t);
831                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
832                                         unfair = true;
833                                         break;
834                                 }
835                         }
836                         if (unfair)
837                                 continue;
838                 }
839                 
840                 /* Cache the latest backtracking point */
841                 set_latest_backtrack(prev);
842
843                 /* If this is a new backtracking point, mark the tree */
844                 if (!node->set_backtrack(tid))
845                         continue;
846                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
847                                         id_to_int(prev->get_tid()),
848                                         id_to_int(t->get_id()));
849                 if (DBG_ENABLED()) {
850                         prev->print();
851                         act->print();
852                 }
853         }
854 }
855
856 /**
857  * @brief Cache the a backtracking point as the "most recent", if eligible
858  *
859  * Note that this does not prepare the NodeStack for this backtracking
860  * operation, it only caches the action on a per-execution basis
861  *
862  * @param act The operation at which we should explore a different next action
863  * (i.e., backtracking point)
864  * @return True, if this action is now the most recent backtracking point;
865  * false otherwise
866  */
867 bool ModelChecker::set_latest_backtrack(ModelAction *act)
868 {
869         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
870                 priv->next_backtrack = act;
871                 return true;
872         }
873         return false;
874 }
875
876 /**
877  * Returns last backtracking point. The model checker will explore a different
878  * path for this point in the next execution.
879  * @return The ModelAction at which the next execution should diverge.
880  */
881 ModelAction * ModelChecker::get_next_backtrack()
882 {
883         ModelAction *next = priv->next_backtrack;
884         priv->next_backtrack = NULL;
885         return next;
886 }
887
888 /**
889  * Processes a read model action.
890  * @param curr is the read model action to process.
891  * @return True if processing this read updates the mo_graph.
892  */
893 bool ModelChecker::process_read(ModelAction *curr)
894 {
895         Node *node = curr->get_node();
896         while (true) {
897                 bool updated = false;
898                 switch (node->get_read_from_status()) {
899                 case READ_FROM_PAST: {
900                         const ModelAction *rf = node->get_read_from_past();
901                         ASSERT(rf);
902
903                         mo_graph->startChanges();
904
905                         ASSERT(!is_infeasible());
906                         if (!check_recency(curr, rf)) {
907                                 if (node->increment_read_from()) {
908                                         mo_graph->rollbackChanges();
909                                         continue;
910                                 } else {
911                                         priv->too_many_reads = true;
912                                 }
913                         }
914
915                         updated = r_modification_order(curr, rf);
916                         read_from(curr, rf);
917                         mo_graph->commitChanges();
918                         mo_check_promises(curr, true);
919                         break;
920                 }
921                 case READ_FROM_PROMISE: {
922                         Promise *promise = curr->get_node()->get_read_from_promise();
923                         if (promise->add_reader(curr))
924                                 priv->failed_promise = true;
925                         curr->set_read_from_promise(promise);
926                         mo_graph->startChanges();
927                         if (!check_recency(curr, promise))
928                                 priv->too_many_reads = true;
929                         updated = r_modification_order(curr, promise);
930                         mo_graph->commitChanges();
931                         break;
932                 }
933                 case READ_FROM_FUTURE: {
934                         /* Read from future value */
935                         struct future_value fv = node->get_future_value();
936                         Promise *promise = new Promise(curr, fv);
937                         curr->set_read_from_promise(promise);
938                         promises->push_back(promise);
939                         mo_graph->startChanges();
940                         updated = r_modification_order(curr, promise);
941                         mo_graph->commitChanges();
942                         break;
943                 }
944                 default:
945                         ASSERT(false);
946                 }
947                 get_thread(curr)->set_return_value(curr->get_return_value());
948                 return updated;
949         }
950 }
951
952 /**
953  * Processes a lock, trylock, or unlock model action.  @param curr is
954  * the read model action to process.
955  *
956  * The try lock operation checks whether the lock is taken.  If not,
957  * it falls to the normal lock operation case.  If so, it returns
958  * fail.
959  *
960  * The lock operation has already been checked that it is enabled, so
961  * it just grabs the lock and synchronizes with the previous unlock.
962  *
963  * The unlock operation has to re-enable all of the threads that are
964  * waiting on the lock.
965  *
966  * @return True if synchronization was updated; false otherwise
967  */
968 bool ModelChecker::process_mutex(ModelAction *curr)
969 {
970         std::mutex *mutex = curr->get_mutex();
971         struct std::mutex_state *state = NULL;
972
973         if (mutex)
974                 state = mutex->get_state();
975
976         switch (curr->get_type()) {
977         case ATOMIC_TRYLOCK: {
978                 bool success = !state->locked;
979                 curr->set_try_lock(success);
980                 if (!success) {
981                         get_thread(curr)->set_return_value(0);
982                         break;
983                 }
984                 get_thread(curr)->set_return_value(1);
985         }
986                 //otherwise fall into the lock case
987         case ATOMIC_LOCK: {
988                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
989                         assert_bug("Lock access before initialization");
990                 state->locked = get_thread(curr);
991                 ModelAction *unlock = get_last_unlock(curr);
992                 //synchronize with the previous unlock statement
993                 if (unlock != NULL) {
994                         curr->synchronize_with(unlock);
995                         return true;
996                 }
997                 break;
998         }
999         case ATOMIC_UNLOCK: {
1000                 //unlock the lock
1001                 state->locked = NULL;
1002                 //wake up the other threads
1003                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
1004                 //activate all the waiting threads
1005                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1006                         scheduler->wake(get_thread(*rit));
1007                 }
1008                 waiters->clear();
1009                 break;
1010         }
1011         case ATOMIC_WAIT: {
1012                 //unlock the lock
1013                 state->locked = NULL;
1014                 //wake up the other threads
1015                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
1016                 //activate all the waiting threads
1017                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1018                         scheduler->wake(get_thread(*rit));
1019                 }
1020                 waiters->clear();
1021                 //check whether we should go to sleep or not...simulate spurious failures
1022                 if (curr->get_node()->get_misc() == 0) {
1023                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1024                         //disable us
1025                         scheduler->sleep(get_thread(curr));
1026                 }
1027                 break;
1028         }
1029         case ATOMIC_NOTIFY_ALL: {
1030                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1031                 //activate all the waiting threads
1032                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1033                         scheduler->wake(get_thread(*rit));
1034                 }
1035                 waiters->clear();
1036                 break;
1037         }
1038         case ATOMIC_NOTIFY_ONE: {
1039                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1040                 int wakeupthread = curr->get_node()->get_misc();
1041                 action_list_t::iterator it = waiters->begin();
1042                 advance(it, wakeupthread);
1043                 scheduler->wake(get_thread(*it));
1044                 waiters->erase(it);
1045                 break;
1046         }
1047
1048         default:
1049                 ASSERT(0);
1050         }
1051         return false;
1052 }
1053
1054 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1055 {
1056         /* Do more ambitious checks now that mo is more complete */
1057         if (mo_may_allow(writer, reader)) {
1058                 Node *node = reader->get_node();
1059
1060                 /* Find an ancestor thread which exists at the time of the reader */
1061                 Thread *write_thread = get_thread(writer);
1062                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1063                         write_thread = write_thread->get_parent();
1064
1065                 struct future_value fv = {
1066                         writer->get_write_value(),
1067                         writer->get_seq_number() + params.maxfuturedelay,
1068                         write_thread->get_id(),
1069                 };
1070                 if (node->add_future_value(fv))
1071                         set_latest_backtrack(reader);
1072         }
1073 }
1074
1075 /**
1076  * Process a write ModelAction
1077  * @param curr The ModelAction to process
1078  * @return True if the mo_graph was updated or promises were resolved
1079  */
1080 bool ModelChecker::process_write(ModelAction *curr)
1081 {
1082         /* Readers to which we may send our future value */
1083         ModelVector<ModelAction *> send_fv;
1084
1085         bool updated_mod_order = w_modification_order(curr, &send_fv);
1086         int promise_idx = get_promise_to_resolve(curr);
1087         const ModelAction *earliest_promise_reader;
1088         bool updated_promises = false;
1089
1090         if (promise_idx >= 0) {
1091                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1092                 updated_promises = resolve_promise(curr, promise_idx);
1093         } else
1094                 earliest_promise_reader = NULL;
1095
1096         /* Don't send future values to reads after the Promise we resolve */
1097         for (unsigned int i = 0; i < send_fv.size(); i++) {
1098                 ModelAction *read = send_fv[i];
1099                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1100                         futurevalues->push_back(PendingFutureValue(curr, read));
1101         }
1102
1103         if (promises->size() == 0) {
1104                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1105                         struct PendingFutureValue pfv = (*futurevalues)[i];
1106                         add_future_value(pfv.writer, pfv.act);
1107                 }
1108                 futurevalues->clear();
1109         }
1110
1111         mo_graph->commitChanges();
1112         mo_check_promises(curr, false);
1113
1114         get_thread(curr)->set_return_value(VALUE_NONE);
1115         return updated_mod_order || updated_promises;
1116 }
1117
1118 /**
1119  * Process a fence ModelAction
1120  * @param curr The ModelAction to process
1121  * @return True if synchronization was updated
1122  */
1123 bool ModelChecker::process_fence(ModelAction *curr)
1124 {
1125         /*
1126          * fence-relaxed: no-op
1127          * fence-release: only log the occurence (not in this function), for
1128          *   use in later synchronization
1129          * fence-acquire (this function): search for hypothetical release
1130          *   sequences
1131          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1132          */
1133         bool updated = false;
1134         if (curr->is_acquire()) {
1135                 action_list_t *list = action_trace;
1136                 action_list_t::reverse_iterator rit;
1137                 /* Find X : is_read(X) && X --sb-> curr */
1138                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1139                         ModelAction *act = *rit;
1140                         if (act == curr)
1141                                 continue;
1142                         if (act->get_tid() != curr->get_tid())
1143                                 continue;
1144                         /* Stop at the beginning of the thread */
1145                         if (act->is_thread_start())
1146                                 break;
1147                         /* Stop once we reach a prior fence-acquire */
1148                         if (act->is_fence() && act->is_acquire())
1149                                 break;
1150                         if (!act->is_read())
1151                                 continue;
1152                         /* read-acquire will find its own release sequences */
1153                         if (act->is_acquire())
1154                                 continue;
1155
1156                         /* Establish hypothetical release sequences */
1157                         rel_heads_list_t release_heads;
1158                         get_release_seq_heads(curr, act, &release_heads);
1159                         for (unsigned int i = 0; i < release_heads.size(); i++)
1160                                 if (!curr->synchronize_with(release_heads[i]))
1161                                         set_bad_synchronization();
1162                         if (release_heads.size() != 0)
1163                                 updated = true;
1164                 }
1165         }
1166         return updated;
1167 }
1168
1169 /**
1170  * @brief Process the current action for thread-related activity
1171  *
1172  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1173  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1174  * synchronization, etc.  This function is a no-op for non-THREAD actions
1175  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1176  *
1177  * @param curr The current action
1178  * @return True if synchronization was updated or a thread completed
1179  */
1180 bool ModelChecker::process_thread_action(ModelAction *curr)
1181 {
1182         bool updated = false;
1183
1184         switch (curr->get_type()) {
1185         case THREAD_CREATE: {
1186                 thrd_t *thrd = (thrd_t *)curr->get_location();
1187                 struct thread_params *params = (struct thread_params *)curr->get_value();
1188                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1189                 add_thread(th);
1190                 th->set_creation(curr);
1191                 /* Promises can be satisfied by children */
1192                 for (unsigned int i = 0; i < promises->size(); i++) {
1193                         Promise *promise = (*promises)[i];
1194                         if (promise->thread_is_available(curr->get_tid()))
1195                                 promise->add_thread(th->get_id());
1196                 }
1197                 break;
1198         }
1199         case THREAD_JOIN: {
1200                 Thread *blocking = curr->get_thread_operand();
1201                 ModelAction *act = get_last_action(blocking->get_id());
1202                 curr->synchronize_with(act);
1203                 updated = true; /* trigger rel-seq checks */
1204                 break;
1205         }
1206         case THREAD_FINISH: {
1207                 Thread *th = get_thread(curr);
1208                 while (!th->wait_list_empty()) {
1209                         ModelAction *act = th->pop_wait_list();
1210                         scheduler->wake(get_thread(act));
1211                 }
1212                 th->complete();
1213                 /* Completed thread can't satisfy promises */
1214                 for (unsigned int i = 0; i < promises->size(); i++) {
1215                         Promise *promise = (*promises)[i];
1216                         if (promise->thread_is_available(th->get_id()))
1217                                 if (promise->eliminate_thread(th->get_id()))
1218                                         priv->failed_promise = true;
1219                 }
1220                 updated = true; /* trigger rel-seq checks */
1221                 break;
1222         }
1223         case THREAD_START: {
1224                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1225                 break;
1226         }
1227         default:
1228                 break;
1229         }
1230
1231         return updated;
1232 }
1233
1234 /**
1235  * @brief Process the current action for release sequence fixup activity
1236  *
1237  * Performs model-checker release sequence fixups for the current action,
1238  * forcing a single pending release sequence to break (with a given, potential
1239  * "loose" write) or to complete (i.e., synchronize). If a pending release
1240  * sequence forms a complete release sequence, then we must perform the fixup
1241  * synchronization, mo_graph additions, etc.
1242  *
1243  * @param curr The current action; must be a release sequence fixup action
1244  * @param work_queue The work queue to which to add work items as they are
1245  * generated
1246  */
1247 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1248 {
1249         const ModelAction *write = curr->get_node()->get_relseq_break();
1250         struct release_seq *sequence = pending_rel_seqs->back();
1251         pending_rel_seqs->pop_back();
1252         ASSERT(sequence);
1253         ModelAction *acquire = sequence->acquire;
1254         const ModelAction *rf = sequence->rf;
1255         const ModelAction *release = sequence->release;
1256         ASSERT(acquire);
1257         ASSERT(release);
1258         ASSERT(rf);
1259         ASSERT(release->same_thread(rf));
1260
1261         if (write == NULL) {
1262                 /**
1263                  * @todo Forcing a synchronization requires that we set
1264                  * modification order constraints. For instance, we can't allow
1265                  * a fixup sequence in which two separate read-acquire
1266                  * operations read from the same sequence, where the first one
1267                  * synchronizes and the other doesn't. Essentially, we can't
1268                  * allow any writes to insert themselves between 'release' and
1269                  * 'rf'
1270                  */
1271
1272                 /* Must synchronize */
1273                 if (!acquire->synchronize_with(release)) {
1274                         set_bad_synchronization();
1275                         return;
1276                 }
1277                 /* Re-check all pending release sequences */
1278                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1279                 /* Re-check act for mo_graph edges */
1280                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1281
1282                 /* propagate synchronization to later actions */
1283                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1284                 for (; (*rit) != acquire; rit++) {
1285                         ModelAction *propagate = *rit;
1286                         if (acquire->happens_before(propagate)) {
1287                                 propagate->synchronize_with(acquire);
1288                                 /* Re-check 'propagate' for mo_graph edges */
1289                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1290                         }
1291                 }
1292         } else {
1293                 /* Break release sequence with new edges:
1294                  *   release --mo--> write --mo--> rf */
1295                 mo_graph->addEdge(release, write);
1296                 mo_graph->addEdge(write, rf);
1297         }
1298
1299         /* See if we have realized a data race */
1300         checkDataRaces();
1301 }
1302
1303 /**
1304  * Initialize the current action by performing one or more of the following
1305  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1306  * in the NodeStack, manipulating backtracking sets, allocating and
1307  * initializing clock vectors, and computing the promises to fulfill.
1308  *
1309  * @param curr The current action, as passed from the user context; may be
1310  * freed/invalidated after the execution of this function, with a different
1311  * action "returned" its place (pass-by-reference)
1312  * @return True if curr is a newly-explored action; false otherwise
1313  */
1314 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1315 {
1316         ModelAction *newcurr;
1317
1318         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1319                 newcurr = process_rmw(*curr);
1320                 delete *curr;
1321
1322                 if (newcurr->is_rmw())
1323                         compute_promises(newcurr);
1324
1325                 *curr = newcurr;
1326                 return false;
1327         }
1328
1329         (*curr)->set_seq_number(get_next_seq_num());
1330
1331         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1332         if (newcurr) {
1333                 /* First restore type and order in case of RMW operation */
1334                 if ((*curr)->is_rmwr())
1335                         newcurr->copy_typeandorder(*curr);
1336
1337                 ASSERT((*curr)->get_location() == newcurr->get_location());
1338                 newcurr->copy_from_new(*curr);
1339
1340                 /* Discard duplicate ModelAction; use action from NodeStack */
1341                 delete *curr;
1342
1343                 /* Always compute new clock vector */
1344                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1345
1346                 *curr = newcurr;
1347                 return false; /* Action was explored previously */
1348         } else {
1349                 newcurr = *curr;
1350
1351                 /* Always compute new clock vector */
1352                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1353
1354                 /* Assign most recent release fence */
1355                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1356
1357                 /*
1358                  * Perform one-time actions when pushing new ModelAction onto
1359                  * NodeStack
1360                  */
1361                 if (newcurr->is_write())
1362                         compute_promises(newcurr);
1363                 else if (newcurr->is_relseq_fixup())
1364                         compute_relseq_breakwrites(newcurr);
1365                 else if (newcurr->is_wait())
1366                         newcurr->get_node()->set_misc_max(2);
1367                 else if (newcurr->is_notify_one()) {
1368                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1369                 }
1370                 return true; /* This was a new ModelAction */
1371         }
1372 }
1373
1374 /**
1375  * @brief Establish reads-from relation between two actions
1376  *
1377  * Perform basic operations involved with establishing a concrete rf relation,
1378  * including setting the ModelAction data and checking for release sequences.
1379  *
1380  * @param act The action that is reading (must be a read)
1381  * @param rf The action from which we are reading (must be a write)
1382  *
1383  * @return True if this read established synchronization
1384  */
1385 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1386 {
1387         ASSERT(rf);
1388         ASSERT(rf->is_write());
1389
1390         act->set_read_from(rf);
1391         if (act->is_acquire()) {
1392                 rel_heads_list_t release_heads;
1393                 get_release_seq_heads(act, act, &release_heads);
1394                 int num_heads = release_heads.size();
1395                 for (unsigned int i = 0; i < release_heads.size(); i++)
1396                         if (!act->synchronize_with(release_heads[i])) {
1397                                 set_bad_synchronization();
1398                                 num_heads--;
1399                         }
1400                 return num_heads > 0;
1401         }
1402         return false;
1403 }
1404
1405 /**
1406  * Check promises and eliminate potentially-satisfying threads when a thread is
1407  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1408  * no longer satisfy a promise generated from that thread.
1409  *
1410  * @param blocker The thread on which a thread is waiting
1411  * @param waiting The waiting thread
1412  */
1413 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1414 {
1415         for (unsigned int i = 0; i < promises->size(); i++) {
1416                 Promise *promise = (*promises)[i];
1417                 if (!promise->thread_is_available(waiting->get_id()))
1418                         continue;
1419                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1420                         ModelAction *reader = promise->get_reader(j);
1421                         if (reader->get_tid() != blocker->get_id())
1422                                 continue;
1423                         if (promise->eliminate_thread(waiting->get_id())) {
1424                                 /* Promise has failed */
1425                                 priv->failed_promise = true;
1426                         } else {
1427                                 /* Only eliminate the 'waiting' thread once */
1428                                 return;
1429                         }
1430                 }
1431         }
1432 }
1433
1434 /**
1435  * @brief Check whether a model action is enabled.
1436  *
1437  * Checks whether a lock or join operation would be successful (i.e., is the
1438  * lock already locked, or is the joined thread already complete). If not, put
1439  * the action in a waiter list.
1440  *
1441  * @param curr is the ModelAction to check whether it is enabled.
1442  * @return a bool that indicates whether the action is enabled.
1443  */
1444 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1445         if (curr->is_lock()) {
1446                 std::mutex *lock = (std::mutex *)curr->get_location();
1447                 struct std::mutex_state *state = lock->get_state();
1448                 if (state->locked) {
1449                         //Stick the action in the appropriate waiting queue
1450                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1451                         return false;
1452                 }
1453         } else if (curr->get_type() == THREAD_JOIN) {
1454                 Thread *blocking = (Thread *)curr->get_location();
1455                 if (!blocking->is_complete()) {
1456                         blocking->push_wait_list(curr);
1457                         thread_blocking_check_promises(blocking, get_thread(curr));
1458                         return false;
1459                 }
1460         }
1461
1462         return true;
1463 }
1464
1465 /**
1466  * This is the heart of the model checker routine. It performs model-checking
1467  * actions corresponding to a given "current action." Among other processes, it
1468  * calculates reads-from relationships, updates synchronization clock vectors,
1469  * forms a memory_order constraints graph, and handles replay/backtrack
1470  * execution when running permutations of previously-observed executions.
1471  *
1472  * @param curr The current action to process
1473  * @return The ModelAction that is actually executed; may be different than
1474  * curr; may be NULL, if the current action is not enabled to run
1475  */
1476 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1477 {
1478         ASSERT(curr);
1479         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1480
1481         if (!check_action_enabled(curr)) {
1482                 /* Make the execution look like we chose to run this action
1483                  * much later, when a lock/join can succeed */
1484                 get_thread(curr)->set_pending(curr);
1485                 scheduler->sleep(get_thread(curr));
1486                 return NULL;
1487         }
1488
1489         bool newly_explored = initialize_curr_action(&curr);
1490
1491         DBG();
1492         if (DBG_ENABLED())
1493                 curr->print();
1494
1495         wake_up_sleeping_actions(curr);
1496
1497         /* Compute fairness information for CHESS yield algorithm */
1498         if (model->params.yieldon) {
1499                 curr->get_node()->update_yield(scheduler);
1500         }
1501
1502         /* Add the action to lists before any other model-checking tasks */
1503         if (!second_part_of_rmw)
1504                 add_action_to_lists(curr);
1505
1506         /* Build may_read_from set for newly-created actions */
1507         if (newly_explored && curr->is_read())
1508                 build_may_read_from(curr);
1509
1510         /* Initialize work_queue with the "current action" work */
1511         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1512         while (!work_queue.empty() && !has_asserted()) {
1513                 WorkQueueEntry work = work_queue.front();
1514                 work_queue.pop_front();
1515
1516                 switch (work.type) {
1517                 case WORK_CHECK_CURR_ACTION: {
1518                         ModelAction *act = work.action;
1519                         bool update = false; /* update this location's release seq's */
1520                         bool update_all = false; /* update all release seq's */
1521
1522                         if (process_thread_action(curr))
1523                                 update_all = true;
1524
1525                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1526                                 update = true;
1527
1528                         if (act->is_write() && process_write(act))
1529                                 update = true;
1530
1531                         if (act->is_fence() && process_fence(act))
1532                                 update_all = true;
1533
1534                         if (act->is_mutex_op() && process_mutex(act))
1535                                 update_all = true;
1536
1537                         if (act->is_relseq_fixup())
1538                                 process_relseq_fixup(curr, &work_queue);
1539
1540                         if (update_all)
1541                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1542                         else if (update)
1543                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1544                         break;
1545                 }
1546                 case WORK_CHECK_RELEASE_SEQ:
1547                         resolve_release_sequences(work.location, &work_queue);
1548                         break;
1549                 case WORK_CHECK_MO_EDGES: {
1550                         /** @todo Complete verification of work_queue */
1551                         ModelAction *act = work.action;
1552                         bool updated = false;
1553
1554                         if (act->is_read()) {
1555                                 const ModelAction *rf = act->get_reads_from();
1556                                 const Promise *promise = act->get_reads_from_promise();
1557                                 if (rf) {
1558                                         if (r_modification_order(act, rf))
1559                                                 updated = true;
1560                                 } else if (promise) {
1561                                         if (r_modification_order(act, promise))
1562                                                 updated = true;
1563                                 }
1564                         }
1565                         if (act->is_write()) {
1566                                 if (w_modification_order(act, NULL))
1567                                         updated = true;
1568                         }
1569                         mo_graph->commitChanges();
1570
1571                         if (updated)
1572                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1573                         break;
1574                 }
1575                 default:
1576                         ASSERT(false);
1577                         break;
1578                 }
1579         }
1580
1581         check_curr_backtracking(curr);
1582         set_backtracking(curr);
1583         return curr;
1584 }
1585
1586 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1587 {
1588         Node *currnode = curr->get_node();
1589         Node *parnode = currnode->get_parent();
1590
1591         if ((parnode && !parnode->backtrack_empty()) ||
1592                          !currnode->misc_empty() ||
1593                          !currnode->read_from_empty() ||
1594                          !currnode->promise_empty() ||
1595                          !currnode->relseq_break_empty()) {
1596                 set_latest_backtrack(curr);
1597         }
1598 }
1599
1600 bool ModelChecker::promises_expired() const
1601 {
1602         for (unsigned int i = 0; i < promises->size(); i++) {
1603                 Promise *promise = (*promises)[i];
1604                 if (promise->get_expiration() < priv->used_sequence_numbers)
1605                         return true;
1606         }
1607         return false;
1608 }
1609
1610 /**
1611  * This is the strongest feasibility check available.
1612  * @return whether the current trace (partial or complete) must be a prefix of
1613  * a feasible trace.
1614  */
1615 bool ModelChecker::isfeasibleprefix() const
1616 {
1617         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1618 }
1619
1620 /**
1621  * Print disagnostic information about an infeasible execution
1622  * @param prefix A string to prefix the output with; if NULL, then a default
1623  * message prefix will be provided
1624  */
1625 void ModelChecker::print_infeasibility(const char *prefix) const
1626 {
1627         char buf[100];
1628         char *ptr = buf;
1629         if (mo_graph->checkForCycles())
1630                 ptr += sprintf(ptr, "[mo cycle]");
1631         if (priv->failed_promise)
1632                 ptr += sprintf(ptr, "[failed promise]");
1633         if (priv->too_many_reads)
1634                 ptr += sprintf(ptr, "[too many reads]");
1635         if (priv->no_valid_reads)
1636                 ptr += sprintf(ptr, "[no valid reads-from]");
1637         if (priv->bad_synchronization)
1638                 ptr += sprintf(ptr, "[bad sw ordering]");
1639         if (promises_expired())
1640                 ptr += sprintf(ptr, "[promise expired]");
1641         if (promises->size() != 0)
1642                 ptr += sprintf(ptr, "[unresolved promise]");
1643         if (ptr != buf)
1644                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1645 }
1646
1647 /**
1648  * Returns whether the current completed trace is feasible, except for pending
1649  * release sequences.
1650  */
1651 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1652 {
1653         return !is_infeasible() && promises->size() == 0;
1654 }
1655
1656 /**
1657  * Check if the current partial trace is infeasible. Does not check any
1658  * end-of-execution flags, which might rule out the execution. Thus, this is
1659  * useful only for ruling an execution as infeasible.
1660  * @return whether the current partial trace is infeasible.
1661  */
1662 bool ModelChecker::is_infeasible() const
1663 {
1664         return mo_graph->checkForCycles() ||
1665                 priv->no_valid_reads ||
1666                 priv->failed_promise ||
1667                 priv->too_many_reads ||
1668                 priv->bad_synchronization ||
1669                 promises_expired();
1670 }
1671
1672 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1673 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1674         ModelAction *lastread = get_last_action(act->get_tid());
1675         lastread->process_rmw(act);
1676         if (act->is_rmw()) {
1677                 if (lastread->get_reads_from())
1678                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1679                 else
1680                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1681                 mo_graph->commitChanges();
1682         }
1683         return lastread;
1684 }
1685
1686 /**
1687  * A helper function for ModelChecker::check_recency, to check if the current
1688  * thread is able to read from a different write/promise for 'params.maxreads'
1689  * number of steps and if that write/promise should become visible (i.e., is
1690  * ordered later in the modification order). This helps model memory liveness.
1691  *
1692  * @param curr The current action. Must be a read.
1693  * @param rf The write/promise from which we plan to read
1694  * @param other_rf The write/promise from which we may read
1695  * @return True if we were able to read from other_rf for params.maxreads steps
1696  */
1697 template <typename T, typename U>
1698 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1699 {
1700         /* Need a different write/promise */
1701         if (other_rf->equals(rf))
1702                 return false;
1703
1704         /* Only look for "newer" writes/promises */
1705         if (!mo_graph->checkReachable(rf, other_rf))
1706                 return false;
1707
1708         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1709         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1710         action_list_t::reverse_iterator rit = list->rbegin();
1711         ASSERT((*rit) == curr);
1712         /* Skip past curr */
1713         rit++;
1714
1715         /* Does this write/promise work for everyone? */
1716         for (int i = 0; i < params.maxreads; i++, rit++) {
1717                 ModelAction *act = *rit;
1718                 if (!act->may_read_from(other_rf))
1719                         return false;
1720         }
1721         return true;
1722 }
1723
1724 /**
1725  * Checks whether a thread has read from the same write or Promise for too many
1726  * times without seeing the effects of a later write/Promise.
1727  *
1728  * Basic idea:
1729  * 1) there must a different write/promise that we could read from,
1730  * 2) we must have read from the same write/promise in excess of maxreads times,
1731  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1732  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1733  *
1734  * If so, we decide that the execution is no longer feasible.
1735  *
1736  * @param curr The current action. Must be a read.
1737  * @param rf The ModelAction/Promise from which we might read.
1738  * @return True if the read should succeed; false otherwise
1739  */
1740 template <typename T>
1741 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1742 {
1743         if (!params.maxreads)
1744                 return true;
1745
1746         //NOTE: Next check is just optimization, not really necessary....
1747         if (curr->get_node()->get_read_from_past_size() +
1748                         curr->get_node()->get_read_from_promise_size() <= 1)
1749                 return true;
1750
1751         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1752         int tid = id_to_int(curr->get_tid());
1753         ASSERT(tid < (int)thrd_lists->size());
1754         action_list_t *list = &(*thrd_lists)[tid];
1755         action_list_t::reverse_iterator rit = list->rbegin();
1756         ASSERT((*rit) == curr);
1757         /* Skip past curr */
1758         rit++;
1759
1760         action_list_t::reverse_iterator ritcopy = rit;
1761         /* See if we have enough reads from the same value */
1762         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1763                 if (ritcopy == list->rend())
1764                         return true;
1765                 ModelAction *act = *ritcopy;
1766                 if (!act->is_read())
1767                         return true;
1768                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1769                         return true;
1770                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1771                         return true;
1772                 if (act->get_node()->get_read_from_past_size() +
1773                                 act->get_node()->get_read_from_promise_size() <= 1)
1774                         return true;
1775         }
1776         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1777                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1778                 if (should_read_instead(curr, rf, write))
1779                         return false; /* liveness failure */
1780         }
1781         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1782                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1783                 if (should_read_instead(curr, rf, promise))
1784                         return false; /* liveness failure */
1785         }
1786         return true;
1787 }
1788
1789 /**
1790  * Updates the mo_graph with the constraints imposed from the current
1791  * read.
1792  *
1793  * Basic idea is the following: Go through each other thread and find
1794  * the last action that happened before our read.  Two cases:
1795  *
1796  * (1) The action is a write => that write must either occur before
1797  * the write we read from or be the write we read from.
1798  *
1799  * (2) The action is a read => the write that that action read from
1800  * must occur before the write we read from or be the same write.
1801  *
1802  * @param curr The current action. Must be a read.
1803  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1804  * @return True if modification order edges were added; false otherwise
1805  */
1806 template <typename rf_type>
1807 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1808 {
1809         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1810         unsigned int i;
1811         bool added = false;
1812         ASSERT(curr->is_read());
1813
1814         /* Last SC fence in the current thread */
1815         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1816         ModelAction *last_sc_write = NULL;
1817         if (curr->is_seqcst())
1818                 last_sc_write = get_last_seq_cst_write(curr);
1819
1820         /* Iterate over all threads */
1821         for (i = 0; i < thrd_lists->size(); i++) {
1822                 /* Last SC fence in thread i */
1823                 ModelAction *last_sc_fence_thread_local = NULL;
1824                 if (int_to_id((int)i) != curr->get_tid())
1825                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1826
1827                 /* Last SC fence in thread i, before last SC fence in current thread */
1828                 ModelAction *last_sc_fence_thread_before = NULL;
1829                 if (last_sc_fence_local)
1830                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1831
1832                 /* Iterate over actions in thread, starting from most recent */
1833                 action_list_t *list = &(*thrd_lists)[i];
1834                 action_list_t::reverse_iterator rit;
1835                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1836                         ModelAction *act = *rit;
1837
1838                         /* Skip curr */
1839                         if (act == curr)
1840                                 continue;
1841                         /* Don't want to add reflexive edges on 'rf' */
1842                         if (act->equals(rf)) {
1843                                 if (act->happens_before(curr))
1844                                         break;
1845                                 else
1846                                         continue;
1847                         }
1848
1849                         if (act->is_write()) {
1850                                 /* C++, Section 29.3 statement 5 */
1851                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1852                                                 *act < *last_sc_fence_thread_local) {
1853                                         added = mo_graph->addEdge(act, rf) || added;
1854                                         break;
1855                                 }
1856                                 /* C++, Section 29.3 statement 4 */
1857                                 else if (act->is_seqcst() && last_sc_fence_local &&
1858                                                 *act < *last_sc_fence_local) {
1859                                         added = mo_graph->addEdge(act, rf) || added;
1860                                         break;
1861                                 }
1862                                 /* C++, Section 29.3 statement 6 */
1863                                 else if (last_sc_fence_thread_before &&
1864                                                 *act < *last_sc_fence_thread_before) {
1865                                         added = mo_graph->addEdge(act, rf) || added;
1866                                         break;
1867                                 }
1868                         }
1869
1870                         /* C++, Section 29.3 statement 3 (second subpoint) */
1871                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1872                                 added = mo_graph->addEdge(act, rf) || added;
1873                                 break;
1874                         }
1875
1876                         /*
1877                          * Include at most one act per-thread that "happens
1878                          * before" curr
1879                          */
1880                         if (act->happens_before(curr)) {
1881                                 if (act->is_write()) {
1882                                         added = mo_graph->addEdge(act, rf) || added;
1883                                 } else {
1884                                         const ModelAction *prevrf = act->get_reads_from();
1885                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1886                                         if (prevrf) {
1887                                                 if (!prevrf->equals(rf))
1888                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1889                                         } else if (!prevrf_promise->equals(rf)) {
1890                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1891                                         }
1892                                 }
1893                                 break;
1894                         }
1895                 }
1896         }
1897
1898         /*
1899          * All compatible, thread-exclusive promises must be ordered after any
1900          * concrete loads from the same thread
1901          */
1902         for (unsigned int i = 0; i < promises->size(); i++)
1903                 if ((*promises)[i]->is_compatible_exclusive(curr))
1904                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1905
1906         return added;
1907 }
1908
1909 /**
1910  * Updates the mo_graph with the constraints imposed from the current write.
1911  *
1912  * Basic idea is the following: Go through each other thread and find
1913  * the lastest action that happened before our write.  Two cases:
1914  *
1915  * (1) The action is a write => that write must occur before
1916  * the current write
1917  *
1918  * (2) The action is a read => the write that that action read from
1919  * must occur before the current write.
1920  *
1921  * This method also handles two other issues:
1922  *
1923  * (I) Sequential Consistency: Making sure that if the current write is
1924  * seq_cst, that it occurs after the previous seq_cst write.
1925  *
1926  * (II) Sending the write back to non-synchronizing reads.
1927  *
1928  * @param curr The current action. Must be a write.
1929  * @param send_fv A vector for stashing reads to which we may pass our future
1930  * value. If NULL, then don't record any future values.
1931  * @return True if modification order edges were added; false otherwise
1932  */
1933 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1934 {
1935         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1936         unsigned int i;
1937         bool added = false;
1938         ASSERT(curr->is_write());
1939
1940         if (curr->is_seqcst()) {
1941                 /* We have to at least see the last sequentially consistent write,
1942                          so we are initialized. */
1943                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1944                 if (last_seq_cst != NULL) {
1945                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1946                 }
1947         }
1948
1949         /* Last SC fence in the current thread */
1950         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1951
1952         /* Iterate over all threads */
1953         for (i = 0; i < thrd_lists->size(); i++) {
1954                 /* Last SC fence in thread i, before last SC fence in current thread */
1955                 ModelAction *last_sc_fence_thread_before = NULL;
1956                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1957                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1958
1959                 /* Iterate over actions in thread, starting from most recent */
1960                 action_list_t *list = &(*thrd_lists)[i];
1961                 action_list_t::reverse_iterator rit;
1962                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1963                         ModelAction *act = *rit;
1964                         if (act == curr) {
1965                                 /*
1966                                  * 1) If RMW and it actually read from something, then we
1967                                  * already have all relevant edges, so just skip to next
1968                                  * thread.
1969                                  *
1970                                  * 2) If RMW and it didn't read from anything, we should
1971                                  * whatever edge we can get to speed up convergence.
1972                                  *
1973                                  * 3) If normal write, we need to look at earlier actions, so
1974                                  * continue processing list.
1975                                  */
1976                                 if (curr->is_rmw()) {
1977                                         if (curr->get_reads_from() != NULL)
1978                                                 break;
1979                                         else
1980                                                 continue;
1981                                 } else
1982                                         continue;
1983                         }
1984
1985                         /* C++, Section 29.3 statement 7 */
1986                         if (last_sc_fence_thread_before && act->is_write() &&
1987                                         *act < *last_sc_fence_thread_before) {
1988                                 added = mo_graph->addEdge(act, curr) || added;
1989                                 break;
1990                         }
1991
1992                         /*
1993                          * Include at most one act per-thread that "happens
1994                          * before" curr
1995                          */
1996                         if (act->happens_before(curr)) {
1997                                 /*
1998                                  * Note: if act is RMW, just add edge:
1999                                  *   act --mo--> curr
2000                                  * The following edge should be handled elsewhere:
2001                                  *   readfrom(act) --mo--> act
2002                                  */
2003                                 if (act->is_write())
2004                                         added = mo_graph->addEdge(act, curr) || added;
2005                                 else if (act->is_read()) {
2006                                         //if previous read accessed a null, just keep going
2007                                         if (act->get_reads_from() == NULL)
2008                                                 continue;
2009                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2010                                 }
2011                                 break;
2012                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2013                                                      !act->same_thread(curr)) {
2014                                 /* We have an action that:
2015                                    (1) did not happen before us
2016                                    (2) is a read and we are a write
2017                                    (3) cannot synchronize with us
2018                                    (4) is in a different thread
2019                                    =>
2020                                    that read could potentially read from our write.  Note that
2021                                    these checks are overly conservative at this point, we'll
2022                                    do more checks before actually removing the
2023                                    pendingfuturevalue.
2024
2025                                  */
2026                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2027                                         if (!is_infeasible())
2028                                                 send_fv->push_back(act);
2029                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2030                                                 add_future_value(curr, act);
2031                                 }
2032                         }
2033                 }
2034         }
2035
2036         /*
2037          * All compatible, thread-exclusive promises must be ordered after any
2038          * concrete stores to the same thread, or else they can be merged with
2039          * this store later
2040          */
2041         for (unsigned int i = 0; i < promises->size(); i++)
2042                 if ((*promises)[i]->is_compatible_exclusive(curr))
2043                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2044
2045         return added;
2046 }
2047
2048 /** Arbitrary reads from the future are not allowed.  Section 29.3
2049  * part 9 places some constraints.  This method checks one result of constraint
2050  * constraint.  Others require compiler support. */
2051 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
2052 {
2053         if (!writer->is_rmw())
2054                 return true;
2055
2056         if (!reader->is_rmw())
2057                 return true;
2058
2059         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2060                 if (search == reader)
2061                         return false;
2062                 if (search->get_tid() == reader->get_tid() &&
2063                                 search->happens_before(reader))
2064                         break;
2065         }
2066
2067         return true;
2068 }
2069
2070 /**
2071  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2072  * some constraints. This method checks one the following constraint (others
2073  * require compiler support):
2074  *
2075  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2076  */
2077 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2078 {
2079         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2080         unsigned int i;
2081         /* Iterate over all threads */
2082         for (i = 0; i < thrd_lists->size(); i++) {
2083                 const ModelAction *write_after_read = NULL;
2084
2085                 /* Iterate over actions in thread, starting from most recent */
2086                 action_list_t *list = &(*thrd_lists)[i];
2087                 action_list_t::reverse_iterator rit;
2088                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2089                         ModelAction *act = *rit;
2090
2091                         /* Don't disallow due to act == reader */
2092                         if (!reader->happens_before(act) || reader == act)
2093                                 break;
2094                         else if (act->is_write())
2095                                 write_after_read = act;
2096                         else if (act->is_read() && act->get_reads_from() != NULL)
2097                                 write_after_read = act->get_reads_from();
2098                 }
2099
2100                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2101                         return false;
2102         }
2103         return true;
2104 }
2105
2106 /**
2107  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2108  * The ModelAction under consideration is expected to be taking part in
2109  * release/acquire synchronization as an object of the "reads from" relation.
2110  * Note that this can only provide release sequence support for RMW chains
2111  * which do not read from the future, as those actions cannot be traced until
2112  * their "promise" is fulfilled. Similarly, we may not even establish the
2113  * presence of a release sequence with certainty, as some modification order
2114  * constraints may be decided further in the future. Thus, this function
2115  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2116  * and a boolean representing certainty.
2117  *
2118  * @param rf The action that might be part of a release sequence. Must be a
2119  * write.
2120  * @param release_heads A pass-by-reference style return parameter. After
2121  * execution of this function, release_heads will contain the heads of all the
2122  * relevant release sequences, if any exists with certainty
2123  * @param pending A pass-by-reference style return parameter which is only used
2124  * when returning false (i.e., uncertain). Returns most information regarding
2125  * an uncertain release sequence, including any write operations that might
2126  * break the sequence.
2127  * @return true, if the ModelChecker is certain that release_heads is complete;
2128  * false otherwise
2129  */
2130 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2131                 rel_heads_list_t *release_heads,
2132                 struct release_seq *pending) const
2133 {
2134         /* Only check for release sequences if there are no cycles */
2135         if (mo_graph->checkForCycles())
2136                 return false;
2137
2138         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2139                 ASSERT(rf->is_write());
2140
2141                 if (rf->is_release())
2142                         release_heads->push_back(rf);
2143                 else if (rf->get_last_fence_release())
2144                         release_heads->push_back(rf->get_last_fence_release());
2145                 if (!rf->is_rmw())
2146                         break; /* End of RMW chain */
2147
2148                 /** @todo Need to be smarter here...  In the linux lock
2149                  * example, this will run to the beginning of the program for
2150                  * every acquire. */
2151                 /** @todo The way to be smarter here is to keep going until 1
2152                  * thread has a release preceded by an acquire and you've seen
2153                  *       both. */
2154
2155                 /* acq_rel RMW is a sufficient stopping condition */
2156                 if (rf->is_acquire() && rf->is_release())
2157                         return true; /* complete */
2158         };
2159         if (!rf) {
2160                 /* read from future: need to settle this later */
2161                 pending->rf = NULL;
2162                 return false; /* incomplete */
2163         }
2164
2165         if (rf->is_release())
2166                 return true; /* complete */
2167
2168         /* else relaxed write
2169          * - check for fence-release in the same thread (29.8, stmt. 3)
2170          * - check modification order for contiguous subsequence
2171          *   -> rf must be same thread as release */
2172
2173         const ModelAction *fence_release = rf->get_last_fence_release();
2174         /* Synchronize with a fence-release unconditionally; we don't need to
2175          * find any more "contiguous subsequence..." for it */
2176         if (fence_release)
2177                 release_heads->push_back(fence_release);
2178
2179         int tid = id_to_int(rf->get_tid());
2180         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2181         action_list_t *list = &(*thrd_lists)[tid];
2182         action_list_t::const_reverse_iterator rit;
2183
2184         /* Find rf in the thread list */
2185         rit = std::find(list->rbegin(), list->rend(), rf);
2186         ASSERT(rit != list->rend());
2187
2188         /* Find the last {write,fence}-release */
2189         for (; rit != list->rend(); rit++) {
2190                 if (fence_release && *(*rit) < *fence_release)
2191                         break;
2192                 if ((*rit)->is_release())
2193                         break;
2194         }
2195         if (rit == list->rend()) {
2196                 /* No write-release in this thread */
2197                 return true; /* complete */
2198         } else if (fence_release && *(*rit) < *fence_release) {
2199                 /* The fence-release is more recent (and so, "stronger") than
2200                  * the most recent write-release */
2201                 return true; /* complete */
2202         } /* else, need to establish contiguous release sequence */
2203         ModelAction *release = *rit;
2204
2205         ASSERT(rf->same_thread(release));
2206
2207         pending->writes.clear();
2208
2209         bool certain = true;
2210         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2211                 if (id_to_int(rf->get_tid()) == (int)i)
2212                         continue;
2213                 list = &(*thrd_lists)[i];
2214
2215                 /* Can we ensure no future writes from this thread may break
2216                  * the release seq? */
2217                 bool future_ordered = false;
2218
2219                 ModelAction *last = get_last_action(int_to_id(i));
2220                 Thread *th = get_thread(int_to_id(i));
2221                 if ((last && rf->happens_before(last)) ||
2222                                 !is_enabled(th) ||
2223                                 th->is_complete())
2224                         future_ordered = true;
2225
2226                 ASSERT(!th->is_model_thread() || future_ordered);
2227
2228                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2229                         const ModelAction *act = *rit;
2230                         /* Reach synchronization -> this thread is complete */
2231                         if (act->happens_before(release))
2232                                 break;
2233                         if (rf->happens_before(act)) {
2234                                 future_ordered = true;
2235                                 continue;
2236                         }
2237
2238                         /* Only non-RMW writes can break release sequences */
2239                         if (!act->is_write() || act->is_rmw())
2240                                 continue;
2241
2242                         /* Check modification order */
2243                         if (mo_graph->checkReachable(rf, act)) {
2244                                 /* rf --mo--> act */
2245                                 future_ordered = true;
2246                                 continue;
2247                         }
2248                         if (mo_graph->checkReachable(act, release))
2249                                 /* act --mo--> release */
2250                                 break;
2251                         if (mo_graph->checkReachable(release, act) &&
2252                                       mo_graph->checkReachable(act, rf)) {
2253                                 /* release --mo-> act --mo--> rf */
2254                                 return true; /* complete */
2255                         }
2256                         /* act may break release sequence */
2257                         pending->writes.push_back(act);
2258                         certain = false;
2259                 }
2260                 if (!future_ordered)
2261                         certain = false; /* This thread is uncertain */
2262         }
2263
2264         if (certain) {
2265                 release_heads->push_back(release);
2266                 pending->writes.clear();
2267         } else {
2268                 pending->release = release;
2269                 pending->rf = rf;
2270         }
2271         return certain;
2272 }
2273
2274 /**
2275  * An interface for getting the release sequence head(s) with which a
2276  * given ModelAction must synchronize. This function only returns a non-empty
2277  * result when it can locate a release sequence head with certainty. Otherwise,
2278  * it may mark the internal state of the ModelChecker so that it will handle
2279  * the release sequence at a later time, causing @a acquire to update its
2280  * synchronization at some later point in execution.
2281  *
2282  * @param acquire The 'acquire' action that may synchronize with a release
2283  * sequence
2284  * @param read The read action that may read from a release sequence; this may
2285  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2286  * when 'acquire' is a fence-acquire)
2287  * @param release_heads A pass-by-reference return parameter. Will be filled
2288  * with the head(s) of the release sequence(s), if they exists with certainty.
2289  * @see ModelChecker::release_seq_heads
2290  */
2291 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2292                 ModelAction *read, rel_heads_list_t *release_heads)
2293 {
2294         const ModelAction *rf = read->get_reads_from();
2295         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2296         sequence->acquire = acquire;
2297         sequence->read = read;
2298
2299         if (!release_seq_heads(rf, release_heads, sequence)) {
2300                 /* add act to 'lazy checking' list */
2301                 pending_rel_seqs->push_back(sequence);
2302         } else {
2303                 snapshot_free(sequence);
2304         }
2305 }
2306
2307 /**
2308  * Attempt to resolve all stashed operations that might synchronize with a
2309  * release sequence for a given location. This implements the "lazy" portion of
2310  * determining whether or not a release sequence was contiguous, since not all
2311  * modification order information is present at the time an action occurs.
2312  *
2313  * @param location The location/object that should be checked for release
2314  * sequence resolutions. A NULL value means to check all locations.
2315  * @param work_queue The work queue to which to add work items as they are
2316  * generated
2317  * @return True if any updates occurred (new synchronization, new mo_graph
2318  * edges)
2319  */
2320 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2321 {
2322         bool updated = false;
2323         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2324         while (it != pending_rel_seqs->end()) {
2325                 struct release_seq *pending = *it;
2326                 ModelAction *acquire = pending->acquire;
2327                 const ModelAction *read = pending->read;
2328
2329                 /* Only resolve sequences on the given location, if provided */
2330                 if (location && read->get_location() != location) {
2331                         it++;
2332                         continue;
2333                 }
2334
2335                 const ModelAction *rf = read->get_reads_from();
2336                 rel_heads_list_t release_heads;
2337                 bool complete;
2338                 complete = release_seq_heads(rf, &release_heads, pending);
2339                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2340                         if (!acquire->has_synchronized_with(release_heads[i])) {
2341                                 if (acquire->synchronize_with(release_heads[i]))
2342                                         updated = true;
2343                                 else
2344                                         set_bad_synchronization();
2345                         }
2346                 }
2347
2348                 if (updated) {
2349                         /* Re-check all pending release sequences */
2350                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2351                         /* Re-check read-acquire for mo_graph edges */
2352                         if (acquire->is_read())
2353                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2354
2355                         /* propagate synchronization to later actions */
2356                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2357                         for (; (*rit) != acquire; rit++) {
2358                                 ModelAction *propagate = *rit;
2359                                 if (acquire->happens_before(propagate)) {
2360                                         propagate->synchronize_with(acquire);
2361                                         /* Re-check 'propagate' for mo_graph edges */
2362                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2363                                 }
2364                         }
2365                 }
2366                 if (complete) {
2367                         it = pending_rel_seqs->erase(it);
2368                         snapshot_free(pending);
2369                 } else {
2370                         it++;
2371                 }
2372         }
2373
2374         // If we resolved promises or data races, see if we have realized a data race.
2375         checkDataRaces();
2376
2377         return updated;
2378 }
2379
2380 /**
2381  * Performs various bookkeeping operations for the current ModelAction. For
2382  * instance, adds action to the per-object, per-thread action vector and to the
2383  * action trace list of all thread actions.
2384  *
2385  * @param act is the ModelAction to add.
2386  */
2387 void ModelChecker::add_action_to_lists(ModelAction *act)
2388 {
2389         int tid = id_to_int(act->get_tid());
2390         ModelAction *uninit = NULL;
2391         int uninit_id = -1;
2392         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2393         if (list->empty() && act->is_atomic_var()) {
2394                 uninit = get_uninitialized_action(act);
2395                 uninit_id = id_to_int(uninit->get_tid());
2396                 list->push_front(uninit);
2397         }
2398         list->push_back(act);
2399
2400         action_trace->push_back(act);
2401         if (uninit)
2402                 action_trace->push_front(uninit);
2403
2404         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2405         if (tid >= (int)vec->size())
2406                 vec->resize(priv->next_thread_id);
2407         (*vec)[tid].push_back(act);
2408         if (uninit)
2409                 (*vec)[uninit_id].push_front(uninit);
2410
2411         if ((int)thrd_last_action->size() <= tid)
2412                 thrd_last_action->resize(get_num_threads());
2413         (*thrd_last_action)[tid] = act;
2414         if (uninit)
2415                 (*thrd_last_action)[uninit_id] = uninit;
2416
2417         if (act->is_fence() && act->is_release()) {
2418                 if ((int)thrd_last_fence_release->size() <= tid)
2419                         thrd_last_fence_release->resize(get_num_threads());
2420                 (*thrd_last_fence_release)[tid] = act;
2421         }
2422
2423         if (act->is_wait()) {
2424                 void *mutex_loc = (void *) act->get_value();
2425                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2426
2427                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2428                 if (tid >= (int)vec->size())
2429                         vec->resize(priv->next_thread_id);
2430                 (*vec)[tid].push_back(act);
2431         }
2432 }
2433
2434 /**
2435  * @brief Get the last action performed by a particular Thread
2436  * @param tid The thread ID of the Thread in question
2437  * @return The last action in the thread
2438  */
2439 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2440 {
2441         int threadid = id_to_int(tid);
2442         if (threadid < (int)thrd_last_action->size())
2443                 return (*thrd_last_action)[id_to_int(tid)];
2444         else
2445                 return NULL;
2446 }
2447
2448 /**
2449  * @brief Get the last fence release performed by a particular Thread
2450  * @param tid The thread ID of the Thread in question
2451  * @return The last fence release in the thread, if one exists; NULL otherwise
2452  */
2453 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2454 {
2455         int threadid = id_to_int(tid);
2456         if (threadid < (int)thrd_last_fence_release->size())
2457                 return (*thrd_last_fence_release)[id_to_int(tid)];
2458         else
2459                 return NULL;
2460 }
2461
2462 /**
2463  * Gets the last memory_order_seq_cst write (in the total global sequence)
2464  * performed on a particular object (i.e., memory location), not including the
2465  * current action.
2466  * @param curr The current ModelAction; also denotes the object location to
2467  * check
2468  * @return The last seq_cst write
2469  */
2470 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2471 {
2472         void *location = curr->get_location();
2473         action_list_t *list = get_safe_ptr_action(obj_map, location);
2474         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2475         action_list_t::reverse_iterator rit;
2476         for (rit = list->rbegin(); (*rit) != curr; rit++)
2477                 ;
2478         rit++; /* Skip past curr */
2479         for ( ; rit != list->rend(); rit++)
2480                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2481                         return *rit;
2482         return NULL;
2483 }
2484
2485 /**
2486  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2487  * performed in a particular thread, prior to a particular fence.
2488  * @param tid The ID of the thread to check
2489  * @param before_fence The fence from which to begin the search; if NULL, then
2490  * search for the most recent fence in the thread.
2491  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2492  */
2493 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2494 {
2495         /* All fences should have NULL location */
2496         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2497         action_list_t::reverse_iterator rit = list->rbegin();
2498
2499         if (before_fence) {
2500                 for (; rit != list->rend(); rit++)
2501                         if (*rit == before_fence)
2502                                 break;
2503
2504                 ASSERT(*rit == before_fence);
2505                 rit++;
2506         }
2507
2508         for (; rit != list->rend(); rit++)
2509                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2510                         return *rit;
2511         return NULL;
2512 }
2513
2514 /**
2515  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2516  * location). This function identifies the mutex according to the current
2517  * action, which is presumed to perform on the same mutex.
2518  * @param curr The current ModelAction; also denotes the object location to
2519  * check
2520  * @return The last unlock operation
2521  */
2522 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2523 {
2524         void *location = curr->get_location();
2525         action_list_t *list = get_safe_ptr_action(obj_map, location);
2526         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2527         action_list_t::reverse_iterator rit;
2528         for (rit = list->rbegin(); rit != list->rend(); rit++)
2529                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2530                         return *rit;
2531         return NULL;
2532 }
2533
2534 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2535 {
2536         ModelAction *parent = get_last_action(tid);
2537         if (!parent)
2538                 parent = get_thread(tid)->get_creation();
2539         return parent;
2540 }
2541
2542 /**
2543  * Returns the clock vector for a given thread.
2544  * @param tid The thread whose clock vector we want
2545  * @return Desired clock vector
2546  */
2547 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2548 {
2549         return get_parent_action(tid)->get_cv();
2550 }
2551
2552 /**
2553  * @brief Find the promise, if any to resolve for the current action
2554  * @param curr The current ModelAction. Should be a write.
2555  * @return The (non-negative) index for the Promise to resolve, if any;
2556  * otherwise -1
2557  */
2558 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2559 {
2560         for (unsigned int i = 0; i < promises->size(); i++)
2561                 if (curr->get_node()->get_promise(i))
2562                         return i;
2563         return -1;
2564 }
2565
2566 /**
2567  * Resolve a Promise with a current write.
2568  * @param write The ModelAction that is fulfilling Promises
2569  * @param promise_idx The index corresponding to the promise
2570  * @return True if the Promise was successfully resolved; false otherwise
2571  */
2572 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2573 {
2574         ModelVector<ModelAction *> actions_to_check;
2575         Promise *promise = (*promises)[promise_idx];
2576
2577         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2578                 ModelAction *read = promise->get_reader(i);
2579                 read_from(read, write);
2580                 actions_to_check.push_back(read);
2581         }
2582         /* Make sure the promise's value matches the write's value */
2583         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2584         if (!mo_graph->resolvePromise(promise, write))
2585                 priv->failed_promise = true;
2586
2587         promises->erase(promises->begin() + promise_idx);
2588         /**
2589          * @todo  It is possible to end up in an inconsistent state, where a
2590          * "resolved" promise may still be referenced if
2591          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2592          *
2593          * Note that the inconsistency only matters when dumping mo_graph to
2594          * file.
2595          *
2596          * delete promise;
2597          */
2598
2599         //Check whether reading these writes has made threads unable to
2600         //resolve promises
2601         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2602                 ModelAction *read = actions_to_check[i];
2603                 mo_check_promises(read, true);
2604         }
2605
2606         return true;
2607 }
2608
2609 /**
2610  * Compute the set of promises that could potentially be satisfied by this
2611  * action. Note that the set computation actually appears in the Node, not in
2612  * ModelChecker.
2613  * @param curr The ModelAction that may satisfy promises
2614  */
2615 void ModelChecker::compute_promises(ModelAction *curr)
2616 {
2617         for (unsigned int i = 0; i < promises->size(); i++) {
2618                 Promise *promise = (*promises)[i];
2619                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2620                         continue;
2621
2622                 bool satisfy = true;
2623                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2624                         const ModelAction *act = promise->get_reader(j);
2625                         if (act->happens_before(curr) ||
2626                                         act->could_synchronize_with(curr)) {
2627                                 satisfy = false;
2628                                 break;
2629                         }
2630                 }
2631                 if (satisfy)
2632                         curr->get_node()->set_promise(i);
2633         }
2634 }
2635
2636 /** Checks promises in response to change in ClockVector Threads. */
2637 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2638 {
2639         for (unsigned int i = 0; i < promises->size(); i++) {
2640                 Promise *promise = (*promises)[i];
2641                 if (!promise->thread_is_available(tid))
2642                         continue;
2643                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2644                         const ModelAction *act = promise->get_reader(j);
2645                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2646                                         merge_cv->synchronized_since(act)) {
2647                                 if (promise->eliminate_thread(tid)) {
2648                                         /* Promise has failed */
2649                                         priv->failed_promise = true;
2650                                         return;
2651                                 }
2652                         }
2653                 }
2654         }
2655 }
2656
2657 void ModelChecker::check_promises_thread_disabled()
2658 {
2659         for (unsigned int i = 0; i < promises->size(); i++) {
2660                 Promise *promise = (*promises)[i];
2661                 if (promise->has_failed()) {
2662                         priv->failed_promise = true;
2663                         return;
2664                 }
2665         }
2666 }
2667
2668 /**
2669  * @brief Checks promises in response to addition to modification order for
2670  * threads.
2671  *
2672  * We test whether threads are still available for satisfying promises after an
2673  * addition to our modification order constraints. Those that are unavailable
2674  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2675  * that promise has failed.
2676  *
2677  * @param act The ModelAction which updated the modification order
2678  * @param is_read_check Should be true if act is a read and we must check for
2679  * updates to the store from which it read (there is a distinction here for
2680  * RMW's, which are both a load and a store)
2681  */
2682 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2683 {
2684         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2685
2686         for (unsigned int i = 0; i < promises->size(); i++) {
2687                 Promise *promise = (*promises)[i];
2688
2689                 // Is this promise on the same location?
2690                 if (!promise->same_location(write))
2691                         continue;
2692
2693                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2694                         const ModelAction *pread = promise->get_reader(j);
2695                         if (!pread->happens_before(act))
2696                                continue;
2697                         if (mo_graph->checkPromise(write, promise)) {
2698                                 priv->failed_promise = true;
2699                                 return;
2700                         }
2701                         break;
2702                 }
2703
2704                 // Don't do any lookups twice for the same thread
2705                 if (!promise->thread_is_available(act->get_tid()))
2706                         continue;
2707
2708                 if (mo_graph->checkReachable(promise, write)) {
2709                         if (mo_graph->checkPromise(write, promise)) {
2710                                 priv->failed_promise = true;
2711                                 return;
2712                         }
2713                 }
2714         }
2715 }
2716
2717 /**
2718  * Compute the set of writes that may break the current pending release
2719  * sequence. This information is extracted from previou release sequence
2720  * calculations.
2721  *
2722  * @param curr The current ModelAction. Must be a release sequence fixup
2723  * action.
2724  */
2725 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2726 {
2727         if (pending_rel_seqs->empty())
2728                 return;
2729
2730         struct release_seq *pending = pending_rel_seqs->back();
2731         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2732                 const ModelAction *write = pending->writes[i];
2733                 curr->get_node()->add_relseq_break(write);
2734         }
2735
2736         /* NULL means don't break the sequence; just synchronize */
2737         curr->get_node()->add_relseq_break(NULL);
2738 }
2739
2740 /**
2741  * Build up an initial set of all past writes that this 'read' action may read
2742  * from, as well as any previously-observed future values that must still be valid.
2743  *
2744  * @param curr is the current ModelAction that we are exploring; it must be a
2745  * 'read' operation.
2746  */
2747 void ModelChecker::build_may_read_from(ModelAction *curr)
2748 {
2749         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2750         unsigned int i;
2751         ASSERT(curr->is_read());
2752
2753         ModelAction *last_sc_write = NULL;
2754
2755         if (curr->is_seqcst())
2756                 last_sc_write = get_last_seq_cst_write(curr);
2757
2758         /* Iterate over all threads */
2759         for (i = 0; i < thrd_lists->size(); i++) {
2760                 /* Iterate over actions in thread, starting from most recent */
2761                 action_list_t *list = &(*thrd_lists)[i];
2762                 action_list_t::reverse_iterator rit;
2763                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2764                         ModelAction *act = *rit;
2765
2766                         /* Only consider 'write' actions */
2767                         if (!act->is_write() || act == curr)
2768                                 continue;
2769
2770                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2771                         bool allow_read = true;
2772
2773                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2774                                 allow_read = false;
2775                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2776                                 allow_read = false;
2777
2778                         if (allow_read) {
2779                                 /* Only add feasible reads */
2780                                 mo_graph->startChanges();
2781                                 r_modification_order(curr, act);
2782                                 if (!is_infeasible())
2783                                         curr->get_node()->add_read_from_past(act);
2784                                 mo_graph->rollbackChanges();
2785                         }
2786
2787                         /* Include at most one act per-thread that "happens before" curr */
2788                         if (act->happens_before(curr))
2789                                 break;
2790                 }
2791         }
2792
2793         /* Inherit existing, promised future values */
2794         for (i = 0; i < promises->size(); i++) {
2795                 const Promise *promise = (*promises)[i];
2796                 const ModelAction *promise_read = promise->get_reader(0);
2797                 if (promise_read->same_var(curr)) {
2798                         /* Only add feasible future-values */
2799                         mo_graph->startChanges();
2800                         r_modification_order(curr, promise);
2801                         if (!is_infeasible())
2802                                 curr->get_node()->add_read_from_promise(promise_read);
2803                         mo_graph->rollbackChanges();
2804                 }
2805         }
2806
2807         /* We may find no valid may-read-from only if the execution is doomed */
2808         if (!curr->get_node()->read_from_size()) {
2809                 priv->no_valid_reads = true;
2810                 set_assert();
2811         }
2812
2813         if (DBG_ENABLED()) {
2814                 model_print("Reached read action:\n");
2815                 curr->print();
2816                 model_print("Printing read_from_past\n");
2817                 curr->get_node()->print_read_from_past();
2818                 model_print("End printing read_from_past\n");
2819         }
2820 }
2821
2822 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2823 {
2824         for ( ; write != NULL; write = write->get_reads_from()) {
2825                 /* UNINIT actions don't have a Node, and they never sleep */
2826                 if (write->is_uninitialized())
2827                         return true;
2828                 Node *prevnode = write->get_node()->get_parent();
2829
2830                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2831                 if (write->is_release() && thread_sleep)
2832                         return true;
2833                 if (!write->is_rmw())
2834                         return false;
2835         }
2836         return true;
2837 }
2838
2839 /**
2840  * @brief Get an action representing an uninitialized atomic
2841  *
2842  * This function may create a new one or try to retrieve one from the NodeStack
2843  *
2844  * @param curr The current action, which prompts the creation of an UNINIT action
2845  * @return A pointer to the UNINIT ModelAction
2846  */
2847 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2848 {
2849         Node *node = curr->get_node();
2850         ModelAction *act = node->get_uninit_action();
2851         if (!act) {
2852                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2853                 node->set_uninit_action(act);
2854         }
2855         act->create_cv(NULL);
2856         return act;
2857 }
2858
2859 static void print_list(action_list_t *list)
2860 {
2861         action_list_t::iterator it;
2862
2863         model_print("---------------------------------------------------------------------\n");
2864
2865         unsigned int hash = 0;
2866
2867         for (it = list->begin(); it != list->end(); it++) {
2868                 const ModelAction *act = *it;
2869                 if (act->get_seq_number() > 0)
2870                         act->print();
2871                 hash = hash^(hash<<3)^((*it)->hash());
2872         }
2873         model_print("HASH %u\n", hash);
2874         model_print("---------------------------------------------------------------------\n");
2875 }
2876
2877 #if SUPPORT_MOD_ORDER_DUMP
2878 void ModelChecker::dumpGraph(char *filename) const
2879 {
2880         char buffer[200];
2881         sprintf(buffer, "%s.dot", filename);
2882         FILE *file = fopen(buffer, "w");
2883         fprintf(file, "digraph %s {\n", filename);
2884         mo_graph->dumpNodes(file);
2885         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2886
2887         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2888                 ModelAction *act = *it;
2889                 if (act->is_read()) {
2890                         mo_graph->dot_print_node(file, act);
2891                         if (act->get_reads_from())
2892                                 mo_graph->dot_print_edge(file,
2893                                                 act->get_reads_from(),
2894                                                 act,
2895                                                 "label=\"rf\", color=red, weight=2");
2896                         else
2897                                 mo_graph->dot_print_edge(file,
2898                                                 act->get_reads_from_promise(),
2899                                                 act,
2900                                                 "label=\"rf\", color=red");
2901                 }
2902                 if (thread_array[act->get_tid()]) {
2903                         mo_graph->dot_print_edge(file,
2904                                         thread_array[id_to_int(act->get_tid())],
2905                                         act,
2906                                         "label=\"sb\", color=blue, weight=400");
2907                 }
2908
2909                 thread_array[act->get_tid()] = act;
2910         }
2911         fprintf(file, "}\n");
2912         model_free(thread_array);
2913         fclose(file);
2914 }
2915 #endif
2916
2917 /** @brief Prints an execution trace summary. */
2918 void ModelChecker::print_summary() const
2919 {
2920 #if SUPPORT_MOD_ORDER_DUMP
2921         char buffername[100];
2922         sprintf(buffername, "exec%04u", stats.num_total);
2923         mo_graph->dumpGraphToFile(buffername);
2924         sprintf(buffername, "graph%04u", stats.num_total);
2925         dumpGraph(buffername);
2926 #endif
2927
2928         model_print("Execution %d:", stats.num_total);
2929         if (isfeasibleprefix()) {
2930                 if (scheduler->all_threads_sleeping())
2931                         model_print(" SLEEP-SET REDUNDANT");
2932                 model_print("\n");
2933         } else
2934                 print_infeasibility(" INFEASIBLE");
2935         print_list(action_trace);
2936         model_print("\n");
2937         if (!promises->empty()) {
2938                 model_print("Pending promises:\n");
2939                 for (unsigned int i = 0; i < promises->size(); i++) {
2940                         model_print(" [P%u] ", i);
2941                         (*promises)[i]->print();
2942                 }
2943                 model_print("\n");
2944         }
2945 }
2946
2947 /**
2948  * Add a Thread to the system for the first time. Should only be called once
2949  * per thread.
2950  * @param t The Thread to add
2951  */
2952 void ModelChecker::add_thread(Thread *t)
2953 {
2954         thread_map->put(id_to_int(t->get_id()), t);
2955         scheduler->add_thread(t);
2956 }
2957
2958 /**
2959  * @brief Get a Thread reference by its ID
2960  * @param tid The Thread's ID
2961  * @return A Thread reference
2962  */
2963 Thread * ModelChecker::get_thread(thread_id_t tid) const
2964 {
2965         return thread_map->get(id_to_int(tid));
2966 }
2967
2968 /**
2969  * @brief Get a reference to the Thread in which a ModelAction was executed
2970  * @param act The ModelAction
2971  * @return A Thread reference
2972  */
2973 Thread * ModelChecker::get_thread(const ModelAction *act) const
2974 {
2975         return get_thread(act->get_tid());
2976 }
2977
2978 /**
2979  * @brief Get a Promise's "promise number"
2980  *
2981  * A "promise number" is an index number that is unique to a promise, valid
2982  * only for a specific snapshot of an execution trace. Promises may come and go
2983  * as they are generated an resolved, so an index only retains meaning for the
2984  * current snapshot.
2985  *
2986  * @param promise The Promise to check
2987  * @return The promise index, if the promise still is valid; otherwise -1
2988  */
2989 int ModelChecker::get_promise_number(const Promise *promise) const
2990 {
2991         for (unsigned int i = 0; i < promises->size(); i++)
2992                 if ((*promises)[i] == promise)
2993                         return i;
2994         /* Not found */
2995         return -1;
2996 }
2997
2998 /**
2999  * @brief Check if a Thread is currently enabled
3000  * @param t The Thread to check
3001  * @return True if the Thread is currently enabled
3002  */
3003 bool ModelChecker::is_enabled(Thread *t) const
3004 {
3005         return scheduler->is_enabled(t);
3006 }
3007
3008 /**
3009  * @brief Check if a Thread is currently enabled
3010  * @param tid The ID of the Thread to check
3011  * @return True if the Thread is currently enabled
3012  */
3013 bool ModelChecker::is_enabled(thread_id_t tid) const
3014 {
3015         return scheduler->is_enabled(tid);
3016 }
3017
3018 /**
3019  * Switch from a model-checker context to a user-thread context. This is the
3020  * complement of ModelChecker::switch_to_master and must be called from the
3021  * model-checker context
3022  *
3023  * @param thread The user-thread to switch to
3024  */
3025 void ModelChecker::switch_from_master(Thread *thread)
3026 {
3027         scheduler->set_current_thread(thread);
3028         Thread::swap(&system_context, thread);
3029 }
3030
3031 /**
3032  * Switch from a user-context to the "master thread" context (a.k.a. system
3033  * context). This switch is made with the intention of exploring a particular
3034  * model-checking action (described by a ModelAction object). Must be called
3035  * from a user-thread context.
3036  *
3037  * @param act The current action that will be explored. May be NULL only if
3038  * trace is exiting via an assertion (see ModelChecker::set_assert and
3039  * ModelChecker::has_asserted).
3040  * @return Return the value returned by the current action
3041  */
3042 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3043 {
3044         DBG();
3045         Thread *old = thread_current();
3046         scheduler->set_current_thread(NULL);
3047         ASSERT(!old->get_pending());
3048         old->set_pending(act);
3049         if (Thread::swap(old, &system_context) < 0) {
3050                 perror("swap threads");
3051                 exit(EXIT_FAILURE);
3052         }
3053         return old->get_return_value();
3054 }
3055
3056 /**
3057  * Takes the next step in the execution, if possible.
3058  * @param curr The current step to take
3059  * @return Returns the next Thread to run, if any; NULL if this execution
3060  * should terminate
3061  */
3062 Thread * ModelChecker::take_step(ModelAction *curr)
3063 {
3064         Thread *curr_thrd = get_thread(curr);
3065         ASSERT(curr_thrd->get_state() == THREAD_READY);
3066
3067         curr = check_current_action(curr);
3068
3069         /* Infeasible -> don't take any more steps */
3070         if (is_infeasible())
3071                 return NULL;
3072         else if (isfeasibleprefix() && have_bug_reports()) {
3073                 set_assert();
3074                 return NULL;
3075         }
3076
3077         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3078                 return NULL;
3079
3080         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3081                 scheduler->remove_thread(curr_thrd);
3082
3083         Thread *next_thrd = NULL;
3084         if (curr)
3085                 next_thrd = action_select_next_thread(curr);
3086         if (!next_thrd)
3087                 next_thrd = get_next_thread();
3088
3089         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3090                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3091
3092         return next_thrd;
3093 }
3094
3095 /** Wrapper to run the user's main function, with appropriate arguments */
3096 void user_main_wrapper(void *)
3097 {
3098         user_main(model->params.argc, model->params.argv);
3099 }
3100
3101 /** @brief Run ModelChecker for the user program */
3102 void ModelChecker::run()
3103 {
3104         do {
3105                 thrd_t user_thread;
3106                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3107                 add_thread(t);
3108
3109                 do {
3110                         /*
3111                          * Stash next pending action(s) for thread(s). There
3112                          * should only need to stash one thread's action--the
3113                          * thread which just took a step--plus the first step
3114                          * for any newly-created thread
3115                          */
3116                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3117                                 thread_id_t tid = int_to_id(i);
3118                                 Thread *thr = get_thread(tid);
3119                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3120                                         switch_from_master(thr);
3121                                         if (is_circular_wait(thr))
3122                                                 assert_bug("Deadlock detected");
3123                                 }
3124                         }
3125
3126                         /* Catch assertions from prior take_step or from
3127                          * between-ModelAction bugs (e.g., data races) */
3128                         if (has_asserted())
3129                                 break;
3130
3131                         /* Consume the next action for a Thread */
3132                         ModelAction *curr = t->get_pending();
3133                         t->set_pending(NULL);
3134                         t = take_step(curr);
3135                 } while (t && !t->is_model_thread());
3136
3137                 /*
3138                  * Launch end-of-execution release sequence fixups only when
3139                  * the execution is otherwise feasible AND there are:
3140                  *
3141                  * (1) pending release sequences
3142                  * (2) pending assertions that could be invalidated by a change
3143                  * in clock vectors (i.e., data races)
3144                  * (3) no pending promises
3145                  */
3146                 while (!pending_rel_seqs->empty() &&
3147                                 is_feasible_prefix_ignore_relseq() &&
3148                                 !unrealizedraces.empty()) {
3149                         model_print("*** WARNING: release sequence fixup action "
3150                                         "(%zu pending release seuqence(s)) ***\n",
3151                                         pending_rel_seqs->size());
3152                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3153                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3154                                         model_thread);
3155                         take_step(fixup);
3156                 };
3157         } while (next_execution());
3158
3159         model_print("******* Model-checking complete: *******\n");
3160         print_stats();
3161 }