model: bugfix - missing SC mo_graph edge
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new SnapVector<Promise *>()),
90         futurevalues(new SnapVector<struct PendingFutureValue>()),
91         pending_rel_seqs(new SnapVector<struct release_seq *>()),
92         thrd_last_action(new SnapVector<ModelAction *>(1)),
93         thrd_last_fence_release(new SnapVector<ModelAction *>()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         SnapVector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new SnapVector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_misc()) {
264                         /* The next node will try to satisfy a different misc_index values. */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else if (nextnode->increment_promise()) {
268                         /* The next node will try to satisfy a different set of promises. */
269                         tid = next->get_tid();
270                         node_stack->pop_restofstack(2);
271                 } else if (nextnode->increment_read_from()) {
272                         /* The next node will read from a different value. */
273                         tid = next->get_tid();
274                         node_stack->pop_restofstack(2);
275                 } else if (nextnode->increment_relseq_break()) {
276                         /* The next node will try to resolve a release sequence differently */
277                         tid = next->get_tid();
278                         node_stack->pop_restofstack(2);
279                 } else {
280                         ASSERT(prevnode);
281                         /* Make a different thread execute for next step */
282                         scheduler->add_sleep(get_thread(next->get_tid()));
283                         tid = prevnode->get_next_backtrack();
284                         /* Make sure the backtracked thread isn't sleeping. */
285                         node_stack->pop_restofstack(1);
286                         if (diverge == earliest_diverge) {
287                                 earliest_diverge = prevnode->get_action();
288                         }
289                 }
290                 /* Start the round robin scheduler from this thread id */
291                 scheduler->set_scheduler_thread(tid);
292                 /* The correct sleep set is in the parent node. */
293                 execute_sleep_set();
294
295                 DEBUG("*** Divergence point ***\n");
296
297                 diverge = NULL;
298         } else {
299                 tid = next->get_tid();
300         }
301         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
302         ASSERT(tid != THREAD_ID_T_NONE);
303         return thread_map->get(id_to_int(tid));
304 }
305
306 /**
307  * We need to know what the next actions of all threads in the sleep
308  * set will be.  This method computes them and stores the actions at
309  * the corresponding thread object's pending action.
310  */
311
312 void ModelChecker::execute_sleep_set()
313 {
314         for (unsigned int i = 0; i < get_num_threads(); i++) {
315                 thread_id_t tid = int_to_id(i);
316                 Thread *thr = get_thread(tid);
317                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
318                         thr->get_pending()->set_sleep_flag();
319                 }
320         }
321 }
322
323 /**
324  * @brief Should the current action wake up a given thread?
325  *
326  * @param curr The current action
327  * @param thread The thread that we might wake up
328  * @return True, if we should wake up the sleeping thread; false otherwise
329  */
330 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
331 {
332         const ModelAction *asleep = thread->get_pending();
333         /* Don't allow partial RMW to wake anyone up */
334         if (curr->is_rmwr())
335                 return false;
336         /* Synchronizing actions may have been backtracked */
337         if (asleep->could_synchronize_with(curr))
338                 return true;
339         /* All acquire/release fences and fence-acquire/store-release */
340         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
341                 return true;
342         /* Fence-release + store can awake load-acquire on the same location */
343         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
344                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
345                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
346                         return true;
347         }
348         return false;
349 }
350
351 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
352 {
353         for (unsigned int i = 0; i < get_num_threads(); i++) {
354                 Thread *thr = get_thread(int_to_id(i));
355                 if (scheduler->is_sleep_set(thr)) {
356                         if (should_wake_up(curr, thr))
357                                 /* Remove this thread from sleep set */
358                                 scheduler->remove_sleep(thr);
359                 }
360         }
361 }
362
363 /** @brief Alert the model-checker that an incorrectly-ordered
364  * synchronization was made */
365 void ModelChecker::set_bad_synchronization()
366 {
367         priv->bad_synchronization = true;
368 }
369
370 /**
371  * Check whether the current trace has triggered an assertion which should halt
372  * its execution.
373  *
374  * @return True, if the execution should be aborted; false otherwise
375  */
376 bool ModelChecker::has_asserted() const
377 {
378         return priv->asserted;
379 }
380
381 /**
382  * Trigger a trace assertion which should cause this execution to be halted.
383  * This can be due to a detected bug or due to an infeasibility that should
384  * halt ASAP.
385  */
386 void ModelChecker::set_assert()
387 {
388         priv->asserted = true;
389 }
390
391 /**
392  * Check if we are in a deadlock. Should only be called at the end of an
393  * execution, although it should not give false positives in the middle of an
394  * execution (there should be some ENABLED thread).
395  *
396  * @return True if program is in a deadlock; false otherwise
397  */
398 bool ModelChecker::is_deadlocked() const
399 {
400         bool blocking_threads = false;
401         for (unsigned int i = 0; i < get_num_threads(); i++) {
402                 thread_id_t tid = int_to_id(i);
403                 if (is_enabled(tid))
404                         return false;
405                 Thread *t = get_thread(tid);
406                 if (!t->is_model_thread() && t->get_pending())
407                         blocking_threads = true;
408         }
409         return blocking_threads;
410 }
411
412 /**
413  * Check if a Thread has entered a circular wait deadlock situation. This will
414  * not check other threads for potential deadlock situations, and may miss
415  * deadlocks involving WAIT.
416  *
417  * @param t The thread which may have entered a deadlock
418  * @return True if this Thread entered a deadlock; false otherwise
419  */
420 bool ModelChecker::is_circular_wait(const Thread *t) const
421 {
422         for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on())
423                 if (waiting == t)
424                         return true;
425         return false;
426 }
427
428 /**
429  * Check if this is a complete execution. That is, have all thread completed
430  * execution (rather than exiting because sleep sets have forced a redundant
431  * execution).
432  *
433  * @return True if the execution is complete.
434  */
435 bool ModelChecker::is_complete_execution() const
436 {
437         for (unsigned int i = 0; i < get_num_threads(); i++)
438                 if (is_enabled(int_to_id(i)))
439                         return false;
440         return true;
441 }
442
443 /**
444  * @brief Assert a bug in the executing program.
445  *
446  * Use this function to assert any sort of bug in the user program. If the
447  * current trace is feasible (actually, a prefix of some feasible execution),
448  * then this execution will be aborted, printing the appropriate message. If
449  * the current trace is not yet feasible, the error message will be stashed and
450  * printed if the execution ever becomes feasible.
451  *
452  * @param msg Descriptive message for the bug (do not include newline char)
453  * @return True if bug is immediately-feasible
454  */
455 bool ModelChecker::assert_bug(const char *msg)
456 {
457         priv->bugs.push_back(new bug_message(msg));
458
459         if (isfeasibleprefix()) {
460                 set_assert();
461                 return true;
462         }
463         return false;
464 }
465
466 /**
467  * @brief Assert a bug in the executing program, asserted by a user thread
468  * @see ModelChecker::assert_bug
469  * @param msg Descriptive message for the bug (do not include newline char)
470  */
471 void ModelChecker::assert_user_bug(const char *msg)
472 {
473         /* If feasible bug, bail out now */
474         if (assert_bug(msg))
475                 switch_to_master(NULL);
476 }
477
478 /** @return True, if any bugs have been reported for this execution */
479 bool ModelChecker::have_bug_reports() const
480 {
481         return priv->bugs.size() != 0;
482 }
483
484 /** @brief Print bug report listing for this execution (if any bugs exist) */
485 void ModelChecker::print_bugs() const
486 {
487         if (have_bug_reports()) {
488                 model_print("Bug report: %zu bug%s detected\n",
489                                 priv->bugs.size(),
490                                 priv->bugs.size() > 1 ? "s" : "");
491                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
492                         priv->bugs[i]->print();
493         }
494 }
495
496 /**
497  * @brief Record end-of-execution stats
498  *
499  * Must be run when exiting an execution. Records various stats.
500  * @see struct execution_stats
501  */
502 void ModelChecker::record_stats()
503 {
504         stats.num_total++;
505         if (!isfeasibleprefix())
506                 stats.num_infeasible++;
507         else if (have_bug_reports())
508                 stats.num_buggy_executions++;
509         else if (is_complete_execution())
510                 stats.num_complete++;
511         else {
512                 stats.num_redundant++;
513
514                 /**
515                  * @todo We can violate this ASSERT() when fairness/sleep sets
516                  * conflict to cause an execution to terminate, e.g. with:
517                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
518                  */
519                 //ASSERT(scheduler->all_threads_sleeping());
520         }
521 }
522
523 /** @brief Print execution stats */
524 void ModelChecker::print_stats() const
525 {
526         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
527         model_print("Number of redundant executions: %d\n", stats.num_redundant);
528         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
529         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
530         model_print("Total executions: %d\n", stats.num_total);
531         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
532 }
533
534 /**
535  * @brief End-of-exeuction print
536  * @param printbugs Should any existing bugs be printed?
537  */
538 void ModelChecker::print_execution(bool printbugs) const
539 {
540         print_program_output();
541
542         if (params.verbose) {
543                 model_print("Earliest divergence point since last feasible execution:\n");
544                 if (earliest_diverge)
545                         earliest_diverge->print();
546                 else
547                         model_print("(Not set)\n");
548
549                 model_print("\n");
550                 print_stats();
551         }
552
553         /* Don't print invalid bugs */
554         if (printbugs)
555                 print_bugs();
556
557         model_print("\n");
558         print_summary();
559 }
560
561 /**
562  * Queries the model-checker for more executions to explore and, if one
563  * exists, resets the model-checker state to execute a new execution.
564  *
565  * @return If there are more executions to explore, return true. Otherwise,
566  * return false.
567  */
568 bool ModelChecker::next_execution()
569 {
570         DBG();
571         /* Is this execution a feasible execution that's worth bug-checking? */
572         bool complete = isfeasibleprefix() && (is_complete_execution() ||
573                         have_bug_reports());
574
575         /* End-of-execution bug checks */
576         if (complete) {
577                 if (is_deadlocked())
578                         assert_bug("Deadlock detected");
579
580                 checkDataRaces();
581         }
582
583         record_stats();
584
585         /* Output */
586         if (params.verbose || (complete && have_bug_reports()))
587                 print_execution(complete);
588         else
589                 clear_program_output();
590
591         if (complete)
592                 earliest_diverge = NULL;
593
594         if ((diverge = get_next_backtrack()) == NULL)
595                 return false;
596
597         if (DBG_ENABLED()) {
598                 model_print("Next execution will diverge at:\n");
599                 diverge->print();
600         }
601
602         reset_to_initial_state();
603         return true;
604 }
605
606 /**
607  * @brief Find the last fence-related backtracking conflict for a ModelAction
608  *
609  * This function performs the search for the most recent conflicting action
610  * against which we should perform backtracking, as affected by fence
611  * operations. This includes pairs of potentially-synchronizing actions which
612  * occur due to fence-acquire or fence-release, and hence should be explored in
613  * the opposite execution order.
614  *
615  * @param act The current action
616  * @return The most recent action which conflicts with act due to fences
617  */
618 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
619 {
620         /* Only perform release/acquire fence backtracking for stores */
621         if (!act->is_write())
622                 return NULL;
623
624         /* Find a fence-release (or, act is a release) */
625         ModelAction *last_release;
626         if (act->is_release())
627                 last_release = act;
628         else
629                 last_release = get_last_fence_release(act->get_tid());
630         if (!last_release)
631                 return NULL;
632
633         /* Skip past the release */
634         action_list_t *list = action_trace;
635         action_list_t::reverse_iterator rit;
636         for (rit = list->rbegin(); rit != list->rend(); rit++)
637                 if (*rit == last_release)
638                         break;
639         ASSERT(rit != list->rend());
640
641         /* Find a prior:
642          *   load-acquire
643          * or
644          *   load --sb-> fence-acquire */
645         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
646         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
647         bool found_acquire_fences = false;
648         for ( ; rit != list->rend(); rit++) {
649                 ModelAction *prev = *rit;
650                 if (act->same_thread(prev))
651                         continue;
652
653                 int tid = id_to_int(prev->get_tid());
654
655                 if (prev->is_read() && act->same_var(prev)) {
656                         if (prev->is_acquire()) {
657                                 /* Found most recent load-acquire, don't need
658                                  * to search for more fences */
659                                 if (!found_acquire_fences)
660                                         return NULL;
661                         } else {
662                                 prior_loads[tid] = prev;
663                         }
664                 }
665                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
666                         found_acquire_fences = true;
667                         acquire_fences[tid] = prev;
668                 }
669         }
670
671         ModelAction *latest_backtrack = NULL;
672         for (unsigned int i = 0; i < acquire_fences.size(); i++)
673                 if (acquire_fences[i] && prior_loads[i])
674                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
675                                 latest_backtrack = acquire_fences[i];
676         return latest_backtrack;
677 }
678
679 /**
680  * @brief Find the last backtracking conflict for a ModelAction
681  *
682  * This function performs the search for the most recent conflicting action
683  * against which we should perform backtracking. This primary includes pairs of
684  * synchronizing actions which should be explored in the opposite execution
685  * order.
686  *
687  * @param act The current action
688  * @return The most recent action which conflicts with act
689  */
690 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
691 {
692         switch (act->get_type()) {
693         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
694         case ATOMIC_READ:
695         case ATOMIC_WRITE:
696         case ATOMIC_RMW: {
697                 ModelAction *ret = NULL;
698
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (prev->could_synchronize_with(act)) {
705                                 ret = prev;
706                                 break;
707                         }
708                 }
709
710                 ModelAction *ret2 = get_last_fence_conflict(act);
711                 if (!ret2)
712                         return ret;
713                 if (!ret)
714                         return ret2;
715                 if (*ret < *ret2)
716                         return ret2;
717                 return ret;
718         }
719         case ATOMIC_LOCK:
720         case ATOMIC_TRYLOCK: {
721                 /* linear search: from most recent to oldest */
722                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
723                 action_list_t::reverse_iterator rit;
724                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
725                         ModelAction *prev = *rit;
726                         if (act->is_conflicting_lock(prev))
727                                 return prev;
728                 }
729                 break;
730         }
731         case ATOMIC_UNLOCK: {
732                 /* linear search: from most recent to oldest */
733                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
734                 action_list_t::reverse_iterator rit;
735                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
736                         ModelAction *prev = *rit;
737                         if (!act->same_thread(prev) && prev->is_failed_trylock())
738                                 return prev;
739                 }
740                 break;
741         }
742         case ATOMIC_WAIT: {
743                 /* linear search: from most recent to oldest */
744                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
745                 action_list_t::reverse_iterator rit;
746                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
747                         ModelAction *prev = *rit;
748                         if (!act->same_thread(prev) && prev->is_failed_trylock())
749                                 return prev;
750                         if (!act->same_thread(prev) && prev->is_notify())
751                                 return prev;
752                 }
753                 break;
754         }
755
756         case ATOMIC_NOTIFY_ALL:
757         case ATOMIC_NOTIFY_ONE: {
758                 /* linear search: from most recent to oldest */
759                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
760                 action_list_t::reverse_iterator rit;
761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
762                         ModelAction *prev = *rit;
763                         if (!act->same_thread(prev) && prev->is_wait())
764                                 return prev;
765                 }
766                 break;
767         }
768         default:
769                 break;
770         }
771         return NULL;
772 }
773
774 /** This method finds backtracking points where we should try to
775  * reorder the parameter ModelAction against.
776  *
777  * @param the ModelAction to find backtracking points for.
778  */
779 void ModelChecker::set_backtracking(ModelAction *act)
780 {
781         Thread *t = get_thread(act);
782         ModelAction *prev = get_last_conflict(act);
783         if (prev == NULL)
784                 return;
785
786         Node *node = prev->get_node()->get_parent();
787
788         int low_tid, high_tid;
789         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
790                 low_tid = id_to_int(act->get_tid());
791                 high_tid = low_tid + 1;
792         } else {
793                 low_tid = 0;
794                 high_tid = get_num_threads();
795         }
796
797         for (int i = low_tid; i < high_tid; i++) {
798                 thread_id_t tid = int_to_id(i);
799
800                 /* Make sure this thread can be enabled here. */
801                 if (i >= node->get_num_threads())
802                         break;
803
804                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
805                 if (node->enabled_status(tid) != THREAD_ENABLED)
806                         continue;
807
808                 /* Check if this has been explored already */
809                 if (node->has_been_explored(tid))
810                         continue;
811
812                 /* See if fairness allows */
813                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
814                         bool unfair = false;
815                         for (int t = 0; t < node->get_num_threads(); t++) {
816                                 thread_id_t tother = int_to_id(t);
817                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
818                                         unfair = true;
819                                         break;
820                                 }
821                         }
822                         if (unfair)
823                                 continue;
824                 }
825
826                 /* See if CHESS-like yield fairness allows */
827                 if (model->params.yieldon) {
828                         bool unfair = false;
829                         for (int t = 0; t < node->get_num_threads(); t++) {
830                                 thread_id_t tother = int_to_id(t);
831                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
832                                         unfair = true;
833                                         break;
834                                 }
835                         }
836                         if (unfair)
837                                 continue;
838                 }
839                 
840                 /* Cache the latest backtracking point */
841                 set_latest_backtrack(prev);
842
843                 /* If this is a new backtracking point, mark the tree */
844                 if (!node->set_backtrack(tid))
845                         continue;
846                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
847                                         id_to_int(prev->get_tid()),
848                                         id_to_int(t->get_id()));
849                 if (DBG_ENABLED()) {
850                         prev->print();
851                         act->print();
852                 }
853         }
854 }
855
856 /**
857  * @brief Cache the a backtracking point as the "most recent", if eligible
858  *
859  * Note that this does not prepare the NodeStack for this backtracking
860  * operation, it only caches the action on a per-execution basis
861  *
862  * @param act The operation at which we should explore a different next action
863  * (i.e., backtracking point)
864  * @return True, if this action is now the most recent backtracking point;
865  * false otherwise
866  */
867 bool ModelChecker::set_latest_backtrack(ModelAction *act)
868 {
869         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
870                 priv->next_backtrack = act;
871                 return true;
872         }
873         return false;
874 }
875
876 /**
877  * Returns last backtracking point. The model checker will explore a different
878  * path for this point in the next execution.
879  * @return The ModelAction at which the next execution should diverge.
880  */
881 ModelAction * ModelChecker::get_next_backtrack()
882 {
883         ModelAction *next = priv->next_backtrack;
884         priv->next_backtrack = NULL;
885         return next;
886 }
887
888 /**
889  * Processes a read model action.
890  * @param curr is the read model action to process.
891  * @return True if processing this read updates the mo_graph.
892  */
893 bool ModelChecker::process_read(ModelAction *curr)
894 {
895         Node *node = curr->get_node();
896         while (true) {
897                 bool updated = false;
898                 switch (node->get_read_from_status()) {
899                 case READ_FROM_PAST: {
900                         const ModelAction *rf = node->get_read_from_past();
901                         ASSERT(rf);
902
903                         mo_graph->startChanges();
904
905                         ASSERT(!is_infeasible());
906                         if (!check_recency(curr, rf)) {
907                                 if (node->increment_read_from()) {
908                                         mo_graph->rollbackChanges();
909                                         continue;
910                                 } else {
911                                         priv->too_many_reads = true;
912                                 }
913                         }
914
915                         updated = r_modification_order(curr, rf);
916                         read_from(curr, rf);
917                         mo_graph->commitChanges();
918                         mo_check_promises(curr, true);
919                         break;
920                 }
921                 case READ_FROM_PROMISE: {
922                         Promise *promise = curr->get_node()->get_read_from_promise();
923                         if (promise->add_reader(curr))
924                                 priv->failed_promise = true;
925                         curr->set_read_from_promise(promise);
926                         mo_graph->startChanges();
927                         if (!check_recency(curr, promise))
928                                 priv->too_many_reads = true;
929                         updated = r_modification_order(curr, promise);
930                         mo_graph->commitChanges();
931                         break;
932                 }
933                 case READ_FROM_FUTURE: {
934                         /* Read from future value */
935                         struct future_value fv = node->get_future_value();
936                         Promise *promise = new Promise(curr, fv);
937                         curr->set_read_from_promise(promise);
938                         promises->push_back(promise);
939                         mo_graph->startChanges();
940                         updated = r_modification_order(curr, promise);
941                         mo_graph->commitChanges();
942                         break;
943                 }
944                 default:
945                         ASSERT(false);
946                 }
947                 get_thread(curr)->set_return_value(curr->get_return_value());
948                 return updated;
949         }
950 }
951
952 /**
953  * Processes a lock, trylock, or unlock model action.  @param curr is
954  * the read model action to process.
955  *
956  * The try lock operation checks whether the lock is taken.  If not,
957  * it falls to the normal lock operation case.  If so, it returns
958  * fail.
959  *
960  * The lock operation has already been checked that it is enabled, so
961  * it just grabs the lock and synchronizes with the previous unlock.
962  *
963  * The unlock operation has to re-enable all of the threads that are
964  * waiting on the lock.
965  *
966  * @return True if synchronization was updated; false otherwise
967  */
968 bool ModelChecker::process_mutex(ModelAction *curr)
969 {
970         std::mutex *mutex = curr->get_mutex();
971         struct std::mutex_state *state = NULL;
972
973         if (mutex)
974                 state = mutex->get_state();
975
976         switch (curr->get_type()) {
977         case ATOMIC_TRYLOCK: {
978                 bool success = !state->locked;
979                 curr->set_try_lock(success);
980                 if (!success) {
981                         get_thread(curr)->set_return_value(0);
982                         break;
983                 }
984                 get_thread(curr)->set_return_value(1);
985         }
986                 //otherwise fall into the lock case
987         case ATOMIC_LOCK: {
988                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
989                         assert_bug("Lock access before initialization");
990                 state->locked = get_thread(curr);
991                 ModelAction *unlock = get_last_unlock(curr);
992                 //synchronize with the previous unlock statement
993                 if (unlock != NULL) {
994                         curr->synchronize_with(unlock);
995                         return true;
996                 }
997                 break;
998         }
999         case ATOMIC_UNLOCK: {
1000                 //unlock the lock
1001                 state->locked = NULL;
1002                 //wake up the other threads
1003                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
1004                 //activate all the waiting threads
1005                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1006                         scheduler->wake(get_thread(*rit));
1007                 }
1008                 waiters->clear();
1009                 break;
1010         }
1011         case ATOMIC_WAIT: {
1012                 //unlock the lock
1013                 state->locked = NULL;
1014                 //wake up the other threads
1015                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
1016                 //activate all the waiting threads
1017                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1018                         scheduler->wake(get_thread(*rit));
1019                 }
1020                 waiters->clear();
1021                 //check whether we should go to sleep or not...simulate spurious failures
1022                 if (curr->get_node()->get_misc() == 0) {
1023                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1024                         //disable us
1025                         scheduler->sleep(get_thread(curr));
1026                 }
1027                 break;
1028         }
1029         case ATOMIC_NOTIFY_ALL: {
1030                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1031                 //activate all the waiting threads
1032                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1033                         scheduler->wake(get_thread(*rit));
1034                 }
1035                 waiters->clear();
1036                 break;
1037         }
1038         case ATOMIC_NOTIFY_ONE: {
1039                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1040                 int wakeupthread = curr->get_node()->get_misc();
1041                 action_list_t::iterator it = waiters->begin();
1042                 advance(it, wakeupthread);
1043                 scheduler->wake(get_thread(*it));
1044                 waiters->erase(it);
1045                 break;
1046         }
1047
1048         default:
1049                 ASSERT(0);
1050         }
1051         return false;
1052 }
1053
1054 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1055 {
1056         /* Do more ambitious checks now that mo is more complete */
1057         if (mo_may_allow(writer, reader)) {
1058                 Node *node = reader->get_node();
1059
1060                 /* Find an ancestor thread which exists at the time of the reader */
1061                 Thread *write_thread = get_thread(writer);
1062                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1063                         write_thread = write_thread->get_parent();
1064
1065                 struct future_value fv = {
1066                         writer->get_write_value(),
1067                         writer->get_seq_number() + params.maxfuturedelay,
1068                         write_thread->get_id(),
1069                 };
1070                 if (node->add_future_value(fv))
1071                         set_latest_backtrack(reader);
1072         }
1073 }
1074
1075 /**
1076  * Process a write ModelAction
1077  * @param curr The ModelAction to process
1078  * @return True if the mo_graph was updated or promises were resolved
1079  */
1080 bool ModelChecker::process_write(ModelAction *curr)
1081 {
1082         /* Readers to which we may send our future value */
1083         ModelVector<ModelAction *> send_fv;
1084
1085         bool updated_mod_order = w_modification_order(curr, &send_fv);
1086         int promise_idx = get_promise_to_resolve(curr);
1087         const ModelAction *earliest_promise_reader;
1088         bool updated_promises = false;
1089
1090         if (promise_idx >= 0) {
1091                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1092                 updated_promises = resolve_promise(curr, promise_idx);
1093         } else
1094                 earliest_promise_reader = NULL;
1095
1096         /* Don't send future values to reads after the Promise we resolve */
1097         for (unsigned int i = 0; i < send_fv.size(); i++) {
1098                 ModelAction *read = send_fv[i];
1099                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1100                         futurevalues->push_back(PendingFutureValue(curr, read));
1101         }
1102
1103         if (promises->size() == 0) {
1104                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1105                         struct PendingFutureValue pfv = (*futurevalues)[i];
1106                         add_future_value(pfv.writer, pfv.act);
1107                 }
1108                 futurevalues->clear();
1109         }
1110
1111         mo_graph->commitChanges();
1112         mo_check_promises(curr, false);
1113
1114         get_thread(curr)->set_return_value(VALUE_NONE);
1115         return updated_mod_order || updated_promises;
1116 }
1117
1118 /**
1119  * Process a fence ModelAction
1120  * @param curr The ModelAction to process
1121  * @return True if synchronization was updated
1122  */
1123 bool ModelChecker::process_fence(ModelAction *curr)
1124 {
1125         /*
1126          * fence-relaxed: no-op
1127          * fence-release: only log the occurence (not in this function), for
1128          *   use in later synchronization
1129          * fence-acquire (this function): search for hypothetical release
1130          *   sequences
1131          */
1132         bool updated = false;
1133         if (curr->is_acquire()) {
1134                 action_list_t *list = action_trace;
1135                 action_list_t::reverse_iterator rit;
1136                 /* Find X : is_read(X) && X --sb-> curr */
1137                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1138                         ModelAction *act = *rit;
1139                         if (act == curr)
1140                                 continue;
1141                         if (act->get_tid() != curr->get_tid())
1142                                 continue;
1143                         /* Stop at the beginning of the thread */
1144                         if (act->is_thread_start())
1145                                 break;
1146                         /* Stop once we reach a prior fence-acquire */
1147                         if (act->is_fence() && act->is_acquire())
1148                                 break;
1149                         if (!act->is_read())
1150                                 continue;
1151                         /* read-acquire will find its own release sequences */
1152                         if (act->is_acquire())
1153                                 continue;
1154
1155                         /* Establish hypothetical release sequences */
1156                         rel_heads_list_t release_heads;
1157                         get_release_seq_heads(curr, act, &release_heads);
1158                         for (unsigned int i = 0; i < release_heads.size(); i++)
1159                                 if (!curr->synchronize_with(release_heads[i]))
1160                                         set_bad_synchronization();
1161                         if (release_heads.size() != 0)
1162                                 updated = true;
1163                 }
1164         }
1165         return updated;
1166 }
1167
1168 /**
1169  * @brief Process the current action for thread-related activity
1170  *
1171  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1172  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1173  * synchronization, etc.  This function is a no-op for non-THREAD actions
1174  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1175  *
1176  * @param curr The current action
1177  * @return True if synchronization was updated or a thread completed
1178  */
1179 bool ModelChecker::process_thread_action(ModelAction *curr)
1180 {
1181         bool updated = false;
1182
1183         switch (curr->get_type()) {
1184         case THREAD_CREATE: {
1185                 thrd_t *thrd = (thrd_t *)curr->get_location();
1186                 struct thread_params *params = (struct thread_params *)curr->get_value();
1187                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1188                 add_thread(th);
1189                 th->set_creation(curr);
1190                 /* Promises can be satisfied by children */
1191                 for (unsigned int i = 0; i < promises->size(); i++) {
1192                         Promise *promise = (*promises)[i];
1193                         if (promise->thread_is_available(curr->get_tid()))
1194                                 promise->add_thread(th->get_id());
1195                 }
1196                 break;
1197         }
1198         case THREAD_JOIN: {
1199                 Thread *blocking = curr->get_thread_operand();
1200                 ModelAction *act = get_last_action(blocking->get_id());
1201                 curr->synchronize_with(act);
1202                 updated = true; /* trigger rel-seq checks */
1203                 break;
1204         }
1205         case THREAD_FINISH: {
1206                 Thread *th = get_thread(curr);
1207                 while (!th->wait_list_empty()) {
1208                         ModelAction *act = th->pop_wait_list();
1209                         scheduler->wake(get_thread(act));
1210                 }
1211                 th->complete();
1212                 /* Completed thread can't satisfy promises */
1213                 for (unsigned int i = 0; i < promises->size(); i++) {
1214                         Promise *promise = (*promises)[i];
1215                         if (promise->thread_is_available(th->get_id()))
1216                                 if (promise->eliminate_thread(th->get_id()))
1217                                         priv->failed_promise = true;
1218                 }
1219                 updated = true; /* trigger rel-seq checks */
1220                 break;
1221         }
1222         case THREAD_START: {
1223                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1224                 break;
1225         }
1226         default:
1227                 break;
1228         }
1229
1230         return updated;
1231 }
1232
1233 /**
1234  * @brief Process the current action for release sequence fixup activity
1235  *
1236  * Performs model-checker release sequence fixups for the current action,
1237  * forcing a single pending release sequence to break (with a given, potential
1238  * "loose" write) or to complete (i.e., synchronize). If a pending release
1239  * sequence forms a complete release sequence, then we must perform the fixup
1240  * synchronization, mo_graph additions, etc.
1241  *
1242  * @param curr The current action; must be a release sequence fixup action
1243  * @param work_queue The work queue to which to add work items as they are
1244  * generated
1245  */
1246 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1247 {
1248         const ModelAction *write = curr->get_node()->get_relseq_break();
1249         struct release_seq *sequence = pending_rel_seqs->back();
1250         pending_rel_seqs->pop_back();
1251         ASSERT(sequence);
1252         ModelAction *acquire = sequence->acquire;
1253         const ModelAction *rf = sequence->rf;
1254         const ModelAction *release = sequence->release;
1255         ASSERT(acquire);
1256         ASSERT(release);
1257         ASSERT(rf);
1258         ASSERT(release->same_thread(rf));
1259
1260         if (write == NULL) {
1261                 /**
1262                  * @todo Forcing a synchronization requires that we set
1263                  * modification order constraints. For instance, we can't allow
1264                  * a fixup sequence in which two separate read-acquire
1265                  * operations read from the same sequence, where the first one
1266                  * synchronizes and the other doesn't. Essentially, we can't
1267                  * allow any writes to insert themselves between 'release' and
1268                  * 'rf'
1269                  */
1270
1271                 /* Must synchronize */
1272                 if (!acquire->synchronize_with(release)) {
1273                         set_bad_synchronization();
1274                         return;
1275                 }
1276                 /* Re-check all pending release sequences */
1277                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1278                 /* Re-check act for mo_graph edges */
1279                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1280
1281                 /* propagate synchronization to later actions */
1282                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1283                 for (; (*rit) != acquire; rit++) {
1284                         ModelAction *propagate = *rit;
1285                         if (acquire->happens_before(propagate)) {
1286                                 propagate->synchronize_with(acquire);
1287                                 /* Re-check 'propagate' for mo_graph edges */
1288                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1289                         }
1290                 }
1291         } else {
1292                 /* Break release sequence with new edges:
1293                  *   release --mo--> write --mo--> rf */
1294                 mo_graph->addEdge(release, write);
1295                 mo_graph->addEdge(write, rf);
1296         }
1297
1298         /* See if we have realized a data race */
1299         checkDataRaces();
1300 }
1301
1302 /**
1303  * Initialize the current action by performing one or more of the following
1304  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1305  * in the NodeStack, manipulating backtracking sets, allocating and
1306  * initializing clock vectors, and computing the promises to fulfill.
1307  *
1308  * @param curr The current action, as passed from the user context; may be
1309  * freed/invalidated after the execution of this function, with a different
1310  * action "returned" its place (pass-by-reference)
1311  * @return True if curr is a newly-explored action; false otherwise
1312  */
1313 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1314 {
1315         ModelAction *newcurr;
1316
1317         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1318                 newcurr = process_rmw(*curr);
1319                 delete *curr;
1320
1321                 if (newcurr->is_rmw())
1322                         compute_promises(newcurr);
1323
1324                 *curr = newcurr;
1325                 return false;
1326         }
1327
1328         (*curr)->set_seq_number(get_next_seq_num());
1329
1330         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1331         if (newcurr) {
1332                 /* First restore type and order in case of RMW operation */
1333                 if ((*curr)->is_rmwr())
1334                         newcurr->copy_typeandorder(*curr);
1335
1336                 ASSERT((*curr)->get_location() == newcurr->get_location());
1337                 newcurr->copy_from_new(*curr);
1338
1339                 /* Discard duplicate ModelAction; use action from NodeStack */
1340                 delete *curr;
1341
1342                 /* Always compute new clock vector */
1343                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1344
1345                 *curr = newcurr;
1346                 return false; /* Action was explored previously */
1347         } else {
1348                 newcurr = *curr;
1349
1350                 /* Always compute new clock vector */
1351                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1352
1353                 /* Assign most recent release fence */
1354                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1355
1356                 /*
1357                  * Perform one-time actions when pushing new ModelAction onto
1358                  * NodeStack
1359                  */
1360                 if (newcurr->is_write())
1361                         compute_promises(newcurr);
1362                 else if (newcurr->is_relseq_fixup())
1363                         compute_relseq_breakwrites(newcurr);
1364                 else if (newcurr->is_wait())
1365                         newcurr->get_node()->set_misc_max(2);
1366                 else if (newcurr->is_notify_one()) {
1367                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1368                 }
1369                 return true; /* This was a new ModelAction */
1370         }
1371 }
1372
1373 /**
1374  * @brief Establish reads-from relation between two actions
1375  *
1376  * Perform basic operations involved with establishing a concrete rf relation,
1377  * including setting the ModelAction data and checking for release sequences.
1378  *
1379  * @param act The action that is reading (must be a read)
1380  * @param rf The action from which we are reading (must be a write)
1381  *
1382  * @return True if this read established synchronization
1383  */
1384 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1385 {
1386         ASSERT(rf);
1387         ASSERT(rf->is_write());
1388
1389         act->set_read_from(rf);
1390         if (act->is_acquire()) {
1391                 rel_heads_list_t release_heads;
1392                 get_release_seq_heads(act, act, &release_heads);
1393                 int num_heads = release_heads.size();
1394                 for (unsigned int i = 0; i < release_heads.size(); i++)
1395                         if (!act->synchronize_with(release_heads[i])) {
1396                                 set_bad_synchronization();
1397                                 num_heads--;
1398                         }
1399                 return num_heads > 0;
1400         }
1401         return false;
1402 }
1403
1404 /**
1405  * Check promises and eliminate potentially-satisfying threads when a thread is
1406  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1407  * no longer satisfy a promise generated from that thread.
1408  *
1409  * @param blocker The thread on which a thread is waiting
1410  * @param waiting The waiting thread
1411  */
1412 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1413 {
1414         for (unsigned int i = 0; i < promises->size(); i++) {
1415                 Promise *promise = (*promises)[i];
1416                 if (!promise->thread_is_available(waiting->get_id()))
1417                         continue;
1418                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1419                         ModelAction *reader = promise->get_reader(j);
1420                         if (reader->get_tid() != blocker->get_id())
1421                                 continue;
1422                         if (promise->eliminate_thread(waiting->get_id())) {
1423                                 /* Promise has failed */
1424                                 priv->failed_promise = true;
1425                         } else {
1426                                 /* Only eliminate the 'waiting' thread once */
1427                                 return;
1428                         }
1429                 }
1430         }
1431 }
1432
1433 /**
1434  * @brief Check whether a model action is enabled.
1435  *
1436  * Checks whether a lock or join operation would be successful (i.e., is the
1437  * lock already locked, or is the joined thread already complete). If not, put
1438  * the action in a waiter list.
1439  *
1440  * @param curr is the ModelAction to check whether it is enabled.
1441  * @return a bool that indicates whether the action is enabled.
1442  */
1443 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1444         if (curr->is_lock()) {
1445                 std::mutex *lock = (std::mutex *)curr->get_location();
1446                 struct std::mutex_state *state = lock->get_state();
1447                 if (state->locked) {
1448                         //Stick the action in the appropriate waiting queue
1449                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1450                         return false;
1451                 }
1452         } else if (curr->get_type() == THREAD_JOIN) {
1453                 Thread *blocking = (Thread *)curr->get_location();
1454                 if (!blocking->is_complete()) {
1455                         blocking->push_wait_list(curr);
1456                         thread_blocking_check_promises(blocking, get_thread(curr));
1457                         return false;
1458                 }
1459         }
1460
1461         return true;
1462 }
1463
1464 /**
1465  * This is the heart of the model checker routine. It performs model-checking
1466  * actions corresponding to a given "current action." Among other processes, it
1467  * calculates reads-from relationships, updates synchronization clock vectors,
1468  * forms a memory_order constraints graph, and handles replay/backtrack
1469  * execution when running permutations of previously-observed executions.
1470  *
1471  * @param curr The current action to process
1472  * @return The ModelAction that is actually executed; may be different than
1473  * curr; may be NULL, if the current action is not enabled to run
1474  */
1475 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1476 {
1477         ASSERT(curr);
1478         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1479
1480         if (!check_action_enabled(curr)) {
1481                 /* Make the execution look like we chose to run this action
1482                  * much later, when a lock/join can succeed */
1483                 get_thread(curr)->set_pending(curr);
1484                 scheduler->sleep(get_thread(curr));
1485                 return NULL;
1486         }
1487
1488         bool newly_explored = initialize_curr_action(&curr);
1489
1490         DBG();
1491         if (DBG_ENABLED())
1492                 curr->print();
1493
1494         wake_up_sleeping_actions(curr);
1495
1496         /* Compute fairness information for CHESS yield algorithm */
1497         if (model->params.yieldon) {
1498                 curr->get_node()->update_yield(scheduler);
1499         }
1500
1501         /* Add the action to lists before any other model-checking tasks */
1502         if (!second_part_of_rmw)
1503                 add_action_to_lists(curr);
1504
1505         /* Build may_read_from set for newly-created actions */
1506         if (newly_explored && curr->is_read())
1507                 build_may_read_from(curr);
1508
1509         /* Initialize work_queue with the "current action" work */
1510         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1511         while (!work_queue.empty() && !has_asserted()) {
1512                 WorkQueueEntry work = work_queue.front();
1513                 work_queue.pop_front();
1514
1515                 switch (work.type) {
1516                 case WORK_CHECK_CURR_ACTION: {
1517                         ModelAction *act = work.action;
1518                         bool update = false; /* update this location's release seq's */
1519                         bool update_all = false; /* update all release seq's */
1520
1521                         if (process_thread_action(curr))
1522                                 update_all = true;
1523
1524                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1525                                 update = true;
1526
1527                         if (act->is_write() && process_write(act))
1528                                 update = true;
1529
1530                         if (act->is_fence() && process_fence(act))
1531                                 update_all = true;
1532
1533                         if (act->is_mutex_op() && process_mutex(act))
1534                                 update_all = true;
1535
1536                         if (act->is_relseq_fixup())
1537                                 process_relseq_fixup(curr, &work_queue);
1538
1539                         if (update_all)
1540                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1541                         else if (update)
1542                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1543                         break;
1544                 }
1545                 case WORK_CHECK_RELEASE_SEQ:
1546                         resolve_release_sequences(work.location, &work_queue);
1547                         break;
1548                 case WORK_CHECK_MO_EDGES: {
1549                         /** @todo Complete verification of work_queue */
1550                         ModelAction *act = work.action;
1551                         bool updated = false;
1552
1553                         if (act->is_read()) {
1554                                 const ModelAction *rf = act->get_reads_from();
1555                                 const Promise *promise = act->get_reads_from_promise();
1556                                 if (rf) {
1557                                         if (r_modification_order(act, rf))
1558                                                 updated = true;
1559                                 } else if (promise) {
1560                                         if (r_modification_order(act, promise))
1561                                                 updated = true;
1562                                 }
1563                         }
1564                         if (act->is_write()) {
1565                                 if (w_modification_order(act, NULL))
1566                                         updated = true;
1567                         }
1568                         mo_graph->commitChanges();
1569
1570                         if (updated)
1571                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1572                         break;
1573                 }
1574                 default:
1575                         ASSERT(false);
1576                         break;
1577                 }
1578         }
1579
1580         check_curr_backtracking(curr);
1581         set_backtracking(curr);
1582         return curr;
1583 }
1584
1585 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1586 {
1587         Node *currnode = curr->get_node();
1588         Node *parnode = currnode->get_parent();
1589
1590         if ((parnode && !parnode->backtrack_empty()) ||
1591                          !currnode->misc_empty() ||
1592                          !currnode->read_from_empty() ||
1593                          !currnode->promise_empty() ||
1594                          !currnode->relseq_break_empty()) {
1595                 set_latest_backtrack(curr);
1596         }
1597 }
1598
1599 bool ModelChecker::promises_expired() const
1600 {
1601         for (unsigned int i = 0; i < promises->size(); i++) {
1602                 Promise *promise = (*promises)[i];
1603                 if (promise->get_expiration() < priv->used_sequence_numbers)
1604                         return true;
1605         }
1606         return false;
1607 }
1608
1609 /**
1610  * This is the strongest feasibility check available.
1611  * @return whether the current trace (partial or complete) must be a prefix of
1612  * a feasible trace.
1613  */
1614 bool ModelChecker::isfeasibleprefix() const
1615 {
1616         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1617 }
1618
1619 /**
1620  * Print disagnostic information about an infeasible execution
1621  * @param prefix A string to prefix the output with; if NULL, then a default
1622  * message prefix will be provided
1623  */
1624 void ModelChecker::print_infeasibility(const char *prefix) const
1625 {
1626         char buf[100];
1627         char *ptr = buf;
1628         if (mo_graph->checkForCycles())
1629                 ptr += sprintf(ptr, "[mo cycle]");
1630         if (priv->failed_promise)
1631                 ptr += sprintf(ptr, "[failed promise]");
1632         if (priv->too_many_reads)
1633                 ptr += sprintf(ptr, "[too many reads]");
1634         if (priv->no_valid_reads)
1635                 ptr += sprintf(ptr, "[no valid reads-from]");
1636         if (priv->bad_synchronization)
1637                 ptr += sprintf(ptr, "[bad sw ordering]");
1638         if (promises_expired())
1639                 ptr += sprintf(ptr, "[promise expired]");
1640         if (promises->size() != 0)
1641                 ptr += sprintf(ptr, "[unresolved promise]");
1642         if (ptr != buf)
1643                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1644 }
1645
1646 /**
1647  * Returns whether the current completed trace is feasible, except for pending
1648  * release sequences.
1649  */
1650 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1651 {
1652         return !is_infeasible() && promises->size() == 0;
1653 }
1654
1655 /**
1656  * Check if the current partial trace is infeasible. Does not check any
1657  * end-of-execution flags, which might rule out the execution. Thus, this is
1658  * useful only for ruling an execution as infeasible.
1659  * @return whether the current partial trace is infeasible.
1660  */
1661 bool ModelChecker::is_infeasible() const
1662 {
1663         return mo_graph->checkForCycles() ||
1664                 priv->no_valid_reads ||
1665                 priv->failed_promise ||
1666                 priv->too_many_reads ||
1667                 priv->bad_synchronization ||
1668                 promises_expired();
1669 }
1670
1671 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1672 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1673         ModelAction *lastread = get_last_action(act->get_tid());
1674         lastread->process_rmw(act);
1675         if (act->is_rmw()) {
1676                 if (lastread->get_reads_from())
1677                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1678                 else
1679                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1680                 mo_graph->commitChanges();
1681         }
1682         return lastread;
1683 }
1684
1685 /**
1686  * A helper function for ModelChecker::check_recency, to check if the current
1687  * thread is able to read from a different write/promise for 'params.maxreads'
1688  * number of steps and if that write/promise should become visible (i.e., is
1689  * ordered later in the modification order). This helps model memory liveness.
1690  *
1691  * @param curr The current action. Must be a read.
1692  * @param rf The write/promise from which we plan to read
1693  * @param other_rf The write/promise from which we may read
1694  * @return True if we were able to read from other_rf for params.maxreads steps
1695  */
1696 template <typename T, typename U>
1697 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1698 {
1699         /* Need a different write/promise */
1700         if (other_rf->equals(rf))
1701                 return false;
1702
1703         /* Only look for "newer" writes/promises */
1704         if (!mo_graph->checkReachable(rf, other_rf))
1705                 return false;
1706
1707         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1708         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1709         action_list_t::reverse_iterator rit = list->rbegin();
1710         ASSERT((*rit) == curr);
1711         /* Skip past curr */
1712         rit++;
1713
1714         /* Does this write/promise work for everyone? */
1715         for (int i = 0; i < params.maxreads; i++, rit++) {
1716                 ModelAction *act = *rit;
1717                 if (!act->may_read_from(other_rf))
1718                         return false;
1719         }
1720         return true;
1721 }
1722
1723 /**
1724  * Checks whether a thread has read from the same write or Promise for too many
1725  * times without seeing the effects of a later write/Promise.
1726  *
1727  * Basic idea:
1728  * 1) there must a different write/promise that we could read from,
1729  * 2) we must have read from the same write/promise in excess of maxreads times,
1730  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1731  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1732  *
1733  * If so, we decide that the execution is no longer feasible.
1734  *
1735  * @param curr The current action. Must be a read.
1736  * @param rf The ModelAction/Promise from which we might read.
1737  * @return True if the read should succeed; false otherwise
1738  */
1739 template <typename T>
1740 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1741 {
1742         if (!params.maxreads)
1743                 return true;
1744
1745         //NOTE: Next check is just optimization, not really necessary....
1746         if (curr->get_node()->get_read_from_past_size() +
1747                         curr->get_node()->get_read_from_promise_size() <= 1)
1748                 return true;
1749
1750         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1751         int tid = id_to_int(curr->get_tid());
1752         ASSERT(tid < (int)thrd_lists->size());
1753         action_list_t *list = &(*thrd_lists)[tid];
1754         action_list_t::reverse_iterator rit = list->rbegin();
1755         ASSERT((*rit) == curr);
1756         /* Skip past curr */
1757         rit++;
1758
1759         action_list_t::reverse_iterator ritcopy = rit;
1760         /* See if we have enough reads from the same value */
1761         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1762                 if (ritcopy == list->rend())
1763                         return true;
1764                 ModelAction *act = *ritcopy;
1765                 if (!act->is_read())
1766                         return true;
1767                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1768                         return true;
1769                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1770                         return true;
1771                 if (act->get_node()->get_read_from_past_size() +
1772                                 act->get_node()->get_read_from_promise_size() <= 1)
1773                         return true;
1774         }
1775         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1776                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1777                 if (should_read_instead(curr, rf, write))
1778                         return false; /* liveness failure */
1779         }
1780         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1781                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1782                 if (should_read_instead(curr, rf, promise))
1783                         return false; /* liveness failure */
1784         }
1785         return true;
1786 }
1787
1788 /**
1789  * Updates the mo_graph with the constraints imposed from the current
1790  * read.
1791  *
1792  * Basic idea is the following: Go through each other thread and find
1793  * the last action that happened before our read.  Two cases:
1794  *
1795  * (1) The action is a write => that write must either occur before
1796  * the write we read from or be the write we read from.
1797  *
1798  * (2) The action is a read => the write that that action read from
1799  * must occur before the write we read from or be the same write.
1800  *
1801  * @param curr The current action. Must be a read.
1802  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1803  * @return True if modification order edges were added; false otherwise
1804  */
1805 template <typename rf_type>
1806 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1807 {
1808         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1809         unsigned int i;
1810         bool added = false;
1811         ASSERT(curr->is_read());
1812
1813         /* Last SC fence in the current thread */
1814         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1815         ModelAction *last_sc_write = NULL;
1816         if (curr->is_seqcst())
1817                 last_sc_write = get_last_seq_cst_write(curr);
1818
1819         /* Iterate over all threads */
1820         for (i = 0; i < thrd_lists->size(); i++) {
1821                 /* Last SC fence in thread i */
1822                 ModelAction *last_sc_fence_thread_local = NULL;
1823                 if (int_to_id((int)i) != curr->get_tid())
1824                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1825
1826                 /* Last SC fence in thread i, before last SC fence in current thread */
1827                 ModelAction *last_sc_fence_thread_before = NULL;
1828                 if (last_sc_fence_local)
1829                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1830
1831                 /* Iterate over actions in thread, starting from most recent */
1832                 action_list_t *list = &(*thrd_lists)[i];
1833                 action_list_t::reverse_iterator rit;
1834                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1835                         ModelAction *act = *rit;
1836
1837                         /* Skip curr */
1838                         if (act == curr)
1839                                 continue;
1840                         /* Don't want to add reflexive edges on 'rf' */
1841                         if (act->equals(rf)) {
1842                                 if (act->happens_before(curr))
1843                                         break;
1844                                 else
1845                                         continue;
1846                         }
1847
1848                         if (act->is_write()) {
1849                                 /* C++, Section 29.3 statement 5 */
1850                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1851                                                 *act < *last_sc_fence_thread_local) {
1852                                         added = mo_graph->addEdge(act, rf) || added;
1853                                         break;
1854                                 }
1855                                 /* C++, Section 29.3 statement 4 */
1856                                 else if (act->is_seqcst() && last_sc_fence_local &&
1857                                                 *act < *last_sc_fence_local) {
1858                                         added = mo_graph->addEdge(act, rf) || added;
1859                                         break;
1860                                 }
1861                                 /* C++, Section 29.3 statement 6 */
1862                                 else if (last_sc_fence_thread_before &&
1863                                                 *act < *last_sc_fence_thread_before) {
1864                                         added = mo_graph->addEdge(act, rf) || added;
1865                                         break;
1866                                 }
1867                         }
1868
1869                         /* C++, Section 29.3 statement 3 (second subpoint) */
1870                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1871                                 added = mo_graph->addEdge(act, rf) || added;
1872                                 break;
1873                         }
1874
1875                         /*
1876                          * Include at most one act per-thread that "happens
1877                          * before" curr
1878                          */
1879                         if (act->happens_before(curr)) {
1880                                 if (act->is_write()) {
1881                                         added = mo_graph->addEdge(act, rf) || added;
1882                                 } else {
1883                                         const ModelAction *prevrf = act->get_reads_from();
1884                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1885                                         if (prevrf) {
1886                                                 if (!prevrf->equals(rf))
1887                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1888                                         } else if (!prevrf_promise->equals(rf)) {
1889                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1890                                         }
1891                                 }
1892                                 break;
1893                         }
1894                 }
1895         }
1896
1897         /*
1898          * All compatible, thread-exclusive promises must be ordered after any
1899          * concrete loads from the same thread
1900          */
1901         for (unsigned int i = 0; i < promises->size(); i++)
1902                 if ((*promises)[i]->is_compatible_exclusive(curr))
1903                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1904
1905         return added;
1906 }
1907
1908 /**
1909  * Updates the mo_graph with the constraints imposed from the current write.
1910  *
1911  * Basic idea is the following: Go through each other thread and find
1912  * the lastest action that happened before our write.  Two cases:
1913  *
1914  * (1) The action is a write => that write must occur before
1915  * the current write
1916  *
1917  * (2) The action is a read => the write that that action read from
1918  * must occur before the current write.
1919  *
1920  * This method also handles two other issues:
1921  *
1922  * (I) Sequential Consistency: Making sure that if the current write is
1923  * seq_cst, that it occurs after the previous seq_cst write.
1924  *
1925  * (II) Sending the write back to non-synchronizing reads.
1926  *
1927  * @param curr The current action. Must be a write.
1928  * @param send_fv A vector for stashing reads to which we may pass our future
1929  * value. If NULL, then don't record any future values.
1930  * @return True if modification order edges were added; false otherwise
1931  */
1932 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1933 {
1934         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1935         unsigned int i;
1936         bool added = false;
1937         ASSERT(curr->is_write());
1938
1939         if (curr->is_seqcst()) {
1940                 /* We have to at least see the last sequentially consistent write,
1941                          so we are initialized. */
1942                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1943                 if (last_seq_cst != NULL) {
1944                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1945                 }
1946         }
1947
1948         /* Last SC fence in the current thread */
1949         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1950
1951         /* Iterate over all threads */
1952         for (i = 0; i < thrd_lists->size(); i++) {
1953                 /* Last SC fence in thread i, before last SC fence in current thread */
1954                 ModelAction *last_sc_fence_thread_before = NULL;
1955                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1956                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1957
1958                 /* Iterate over actions in thread, starting from most recent */
1959                 action_list_t *list = &(*thrd_lists)[i];
1960                 action_list_t::reverse_iterator rit;
1961                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1962                         ModelAction *act = *rit;
1963                         if (act == curr) {
1964                                 /*
1965                                  * 1) If RMW and it actually read from something, then we
1966                                  * already have all relevant edges, so just skip to next
1967                                  * thread.
1968                                  *
1969                                  * 2) If RMW and it didn't read from anything, we should
1970                                  * whatever edge we can get to speed up convergence.
1971                                  *
1972                                  * 3) If normal write, we need to look at earlier actions, so
1973                                  * continue processing list.
1974                                  */
1975                                 if (curr->is_rmw()) {
1976                                         if (curr->get_reads_from() != NULL)
1977                                                 break;
1978                                         else
1979                                                 continue;
1980                                 } else
1981                                         continue;
1982                         }
1983
1984                         /* C++, Section 29.3 statement 7 */
1985                         if (last_sc_fence_thread_before && act->is_write() &&
1986                                         *act < *last_sc_fence_thread_before) {
1987                                 added = mo_graph->addEdge(act, curr) || added;
1988                                 break;
1989                         }
1990
1991                         /*
1992                          * Include at most one act per-thread that "happens
1993                          * before" curr
1994                          */
1995                         if (act->happens_before(curr)) {
1996                                 /*
1997                                  * Note: if act is RMW, just add edge:
1998                                  *   act --mo--> curr
1999                                  * The following edge should be handled elsewhere:
2000                                  *   readfrom(act) --mo--> act
2001                                  */
2002                                 if (act->is_write())
2003                                         added = mo_graph->addEdge(act, curr) || added;
2004                                 else if (act->is_read()) {
2005                                         //if previous read accessed a null, just keep going
2006                                         if (act->get_reads_from() == NULL)
2007                                                 continue;
2008                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2009                                 }
2010                                 break;
2011                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2012                                                      !act->same_thread(curr)) {
2013                                 /* We have an action that:
2014                                    (1) did not happen before us
2015                                    (2) is a read and we are a write
2016                                    (3) cannot synchronize with us
2017                                    (4) is in a different thread
2018                                    =>
2019                                    that read could potentially read from our write.  Note that
2020                                    these checks are overly conservative at this point, we'll
2021                                    do more checks before actually removing the
2022                                    pendingfuturevalue.
2023
2024                                  */
2025                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2026                                         if (!is_infeasible())
2027                                                 send_fv->push_back(act);
2028                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2029                                                 add_future_value(curr, act);
2030                                 }
2031                         }
2032                 }
2033         }
2034
2035         /*
2036          * All compatible, thread-exclusive promises must be ordered after any
2037          * concrete stores to the same thread, or else they can be merged with
2038          * this store later
2039          */
2040         for (unsigned int i = 0; i < promises->size(); i++)
2041                 if ((*promises)[i]->is_compatible_exclusive(curr))
2042                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2043
2044         return added;
2045 }
2046
2047 /** Arbitrary reads from the future are not allowed.  Section 29.3
2048  * part 9 places some constraints.  This method checks one result of constraint
2049  * constraint.  Others require compiler support. */
2050 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
2051 {
2052         if (!writer->is_rmw())
2053                 return true;
2054
2055         if (!reader->is_rmw())
2056                 return true;
2057
2058         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2059                 if (search == reader)
2060                         return false;
2061                 if (search->get_tid() == reader->get_tid() &&
2062                                 search->happens_before(reader))
2063                         break;
2064         }
2065
2066         return true;
2067 }
2068
2069 /**
2070  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2071  * some constraints. This method checks one the following constraint (others
2072  * require compiler support):
2073  *
2074  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2075  */
2076 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2077 {
2078         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2079         unsigned int i;
2080         /* Iterate over all threads */
2081         for (i = 0; i < thrd_lists->size(); i++) {
2082                 const ModelAction *write_after_read = NULL;
2083
2084                 /* Iterate over actions in thread, starting from most recent */
2085                 action_list_t *list = &(*thrd_lists)[i];
2086                 action_list_t::reverse_iterator rit;
2087                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2088                         ModelAction *act = *rit;
2089
2090                         /* Don't disallow due to act == reader */
2091                         if (!reader->happens_before(act) || reader == act)
2092                                 break;
2093                         else if (act->is_write())
2094                                 write_after_read = act;
2095                         else if (act->is_read() && act->get_reads_from() != NULL)
2096                                 write_after_read = act->get_reads_from();
2097                 }
2098
2099                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2100                         return false;
2101         }
2102         return true;
2103 }
2104
2105 /**
2106  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2107  * The ModelAction under consideration is expected to be taking part in
2108  * release/acquire synchronization as an object of the "reads from" relation.
2109  * Note that this can only provide release sequence support for RMW chains
2110  * which do not read from the future, as those actions cannot be traced until
2111  * their "promise" is fulfilled. Similarly, we may not even establish the
2112  * presence of a release sequence with certainty, as some modification order
2113  * constraints may be decided further in the future. Thus, this function
2114  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2115  * and a boolean representing certainty.
2116  *
2117  * @param rf The action that might be part of a release sequence. Must be a
2118  * write.
2119  * @param release_heads A pass-by-reference style return parameter. After
2120  * execution of this function, release_heads will contain the heads of all the
2121  * relevant release sequences, if any exists with certainty
2122  * @param pending A pass-by-reference style return parameter which is only used
2123  * when returning false (i.e., uncertain). Returns most information regarding
2124  * an uncertain release sequence, including any write operations that might
2125  * break the sequence.
2126  * @return true, if the ModelChecker is certain that release_heads is complete;
2127  * false otherwise
2128  */
2129 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2130                 rel_heads_list_t *release_heads,
2131                 struct release_seq *pending) const
2132 {
2133         /* Only check for release sequences if there are no cycles */
2134         if (mo_graph->checkForCycles())
2135                 return false;
2136
2137         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2138                 ASSERT(rf->is_write());
2139
2140                 if (rf->is_release())
2141                         release_heads->push_back(rf);
2142                 else if (rf->get_last_fence_release())
2143                         release_heads->push_back(rf->get_last_fence_release());
2144                 if (!rf->is_rmw())
2145                         break; /* End of RMW chain */
2146
2147                 /** @todo Need to be smarter here...  In the linux lock
2148                  * example, this will run to the beginning of the program for
2149                  * every acquire. */
2150                 /** @todo The way to be smarter here is to keep going until 1
2151                  * thread has a release preceded by an acquire and you've seen
2152                  *       both. */
2153
2154                 /* acq_rel RMW is a sufficient stopping condition */
2155                 if (rf->is_acquire() && rf->is_release())
2156                         return true; /* complete */
2157         };
2158         if (!rf) {
2159                 /* read from future: need to settle this later */
2160                 pending->rf = NULL;
2161                 return false; /* incomplete */
2162         }
2163
2164         if (rf->is_release())
2165                 return true; /* complete */
2166
2167         /* else relaxed write
2168          * - check for fence-release in the same thread (29.8, stmt. 3)
2169          * - check modification order for contiguous subsequence
2170          *   -> rf must be same thread as release */
2171
2172         const ModelAction *fence_release = rf->get_last_fence_release();
2173         /* Synchronize with a fence-release unconditionally; we don't need to
2174          * find any more "contiguous subsequence..." for it */
2175         if (fence_release)
2176                 release_heads->push_back(fence_release);
2177
2178         int tid = id_to_int(rf->get_tid());
2179         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2180         action_list_t *list = &(*thrd_lists)[tid];
2181         action_list_t::const_reverse_iterator rit;
2182
2183         /* Find rf in the thread list */
2184         rit = std::find(list->rbegin(), list->rend(), rf);
2185         ASSERT(rit != list->rend());
2186
2187         /* Find the last {write,fence}-release */
2188         for (; rit != list->rend(); rit++) {
2189                 if (fence_release && *(*rit) < *fence_release)
2190                         break;
2191                 if ((*rit)->is_release())
2192                         break;
2193         }
2194         if (rit == list->rend()) {
2195                 /* No write-release in this thread */
2196                 return true; /* complete */
2197         } else if (fence_release && *(*rit) < *fence_release) {
2198                 /* The fence-release is more recent (and so, "stronger") than
2199                  * the most recent write-release */
2200                 return true; /* complete */
2201         } /* else, need to establish contiguous release sequence */
2202         ModelAction *release = *rit;
2203
2204         ASSERT(rf->same_thread(release));
2205
2206         pending->writes.clear();
2207
2208         bool certain = true;
2209         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2210                 if (id_to_int(rf->get_tid()) == (int)i)
2211                         continue;
2212                 list = &(*thrd_lists)[i];
2213
2214                 /* Can we ensure no future writes from this thread may break
2215                  * the release seq? */
2216                 bool future_ordered = false;
2217
2218                 ModelAction *last = get_last_action(int_to_id(i));
2219                 Thread *th = get_thread(int_to_id(i));
2220                 if ((last && rf->happens_before(last)) ||
2221                                 !is_enabled(th) ||
2222                                 th->is_complete())
2223                         future_ordered = true;
2224
2225                 ASSERT(!th->is_model_thread() || future_ordered);
2226
2227                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2228                         const ModelAction *act = *rit;
2229                         /* Reach synchronization -> this thread is complete */
2230                         if (act->happens_before(release))
2231                                 break;
2232                         if (rf->happens_before(act)) {
2233                                 future_ordered = true;
2234                                 continue;
2235                         }
2236
2237                         /* Only non-RMW writes can break release sequences */
2238                         if (!act->is_write() || act->is_rmw())
2239                                 continue;
2240
2241                         /* Check modification order */
2242                         if (mo_graph->checkReachable(rf, act)) {
2243                                 /* rf --mo--> act */
2244                                 future_ordered = true;
2245                                 continue;
2246                         }
2247                         if (mo_graph->checkReachable(act, release))
2248                                 /* act --mo--> release */
2249                                 break;
2250                         if (mo_graph->checkReachable(release, act) &&
2251                                       mo_graph->checkReachable(act, rf)) {
2252                                 /* release --mo-> act --mo--> rf */
2253                                 return true; /* complete */
2254                         }
2255                         /* act may break release sequence */
2256                         pending->writes.push_back(act);
2257                         certain = false;
2258                 }
2259                 if (!future_ordered)
2260                         certain = false; /* This thread is uncertain */
2261         }
2262
2263         if (certain) {
2264                 release_heads->push_back(release);
2265                 pending->writes.clear();
2266         } else {
2267                 pending->release = release;
2268                 pending->rf = rf;
2269         }
2270         return certain;
2271 }
2272
2273 /**
2274  * An interface for getting the release sequence head(s) with which a
2275  * given ModelAction must synchronize. This function only returns a non-empty
2276  * result when it can locate a release sequence head with certainty. Otherwise,
2277  * it may mark the internal state of the ModelChecker so that it will handle
2278  * the release sequence at a later time, causing @a acquire to update its
2279  * synchronization at some later point in execution.
2280  *
2281  * @param acquire The 'acquire' action that may synchronize with a release
2282  * sequence
2283  * @param read The read action that may read from a release sequence; this may
2284  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2285  * when 'acquire' is a fence-acquire)
2286  * @param release_heads A pass-by-reference return parameter. Will be filled
2287  * with the head(s) of the release sequence(s), if they exists with certainty.
2288  * @see ModelChecker::release_seq_heads
2289  */
2290 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2291                 ModelAction *read, rel_heads_list_t *release_heads)
2292 {
2293         const ModelAction *rf = read->get_reads_from();
2294         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2295         sequence->acquire = acquire;
2296         sequence->read = read;
2297
2298         if (!release_seq_heads(rf, release_heads, sequence)) {
2299                 /* add act to 'lazy checking' list */
2300                 pending_rel_seqs->push_back(sequence);
2301         } else {
2302                 snapshot_free(sequence);
2303         }
2304 }
2305
2306 /**
2307  * Attempt to resolve all stashed operations that might synchronize with a
2308  * release sequence for a given location. This implements the "lazy" portion of
2309  * determining whether or not a release sequence was contiguous, since not all
2310  * modification order information is present at the time an action occurs.
2311  *
2312  * @param location The location/object that should be checked for release
2313  * sequence resolutions. A NULL value means to check all locations.
2314  * @param work_queue The work queue to which to add work items as they are
2315  * generated
2316  * @return True if any updates occurred (new synchronization, new mo_graph
2317  * edges)
2318  */
2319 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2320 {
2321         bool updated = false;
2322         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2323         while (it != pending_rel_seqs->end()) {
2324                 struct release_seq *pending = *it;
2325                 ModelAction *acquire = pending->acquire;
2326                 const ModelAction *read = pending->read;
2327
2328                 /* Only resolve sequences on the given location, if provided */
2329                 if (location && read->get_location() != location) {
2330                         it++;
2331                         continue;
2332                 }
2333
2334                 const ModelAction *rf = read->get_reads_from();
2335                 rel_heads_list_t release_heads;
2336                 bool complete;
2337                 complete = release_seq_heads(rf, &release_heads, pending);
2338                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2339                         if (!acquire->has_synchronized_with(release_heads[i])) {
2340                                 if (acquire->synchronize_with(release_heads[i]))
2341                                         updated = true;
2342                                 else
2343                                         set_bad_synchronization();
2344                         }
2345                 }
2346
2347                 if (updated) {
2348                         /* Re-check all pending release sequences */
2349                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2350                         /* Re-check read-acquire for mo_graph edges */
2351                         if (acquire->is_read())
2352                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2353
2354                         /* propagate synchronization to later actions */
2355                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2356                         for (; (*rit) != acquire; rit++) {
2357                                 ModelAction *propagate = *rit;
2358                                 if (acquire->happens_before(propagate)) {
2359                                         propagate->synchronize_with(acquire);
2360                                         /* Re-check 'propagate' for mo_graph edges */
2361                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2362                                 }
2363                         }
2364                 }
2365                 if (complete) {
2366                         it = pending_rel_seqs->erase(it);
2367                         snapshot_free(pending);
2368                 } else {
2369                         it++;
2370                 }
2371         }
2372
2373         // If we resolved promises or data races, see if we have realized a data race.
2374         checkDataRaces();
2375
2376         return updated;
2377 }
2378
2379 /**
2380  * Performs various bookkeeping operations for the current ModelAction. For
2381  * instance, adds action to the per-object, per-thread action vector and to the
2382  * action trace list of all thread actions.
2383  *
2384  * @param act is the ModelAction to add.
2385  */
2386 void ModelChecker::add_action_to_lists(ModelAction *act)
2387 {
2388         int tid = id_to_int(act->get_tid());
2389         ModelAction *uninit = NULL;
2390         int uninit_id = -1;
2391         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2392         if (list->empty() && act->is_atomic_var()) {
2393                 uninit = get_uninitialized_action(act);
2394                 uninit_id = id_to_int(uninit->get_tid());
2395                 list->push_front(uninit);
2396         }
2397         list->push_back(act);
2398
2399         action_trace->push_back(act);
2400         if (uninit)
2401                 action_trace->push_front(uninit);
2402
2403         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2404         if (tid >= (int)vec->size())
2405                 vec->resize(priv->next_thread_id);
2406         (*vec)[tid].push_back(act);
2407         if (uninit)
2408                 (*vec)[uninit_id].push_front(uninit);
2409
2410         if ((int)thrd_last_action->size() <= tid)
2411                 thrd_last_action->resize(get_num_threads());
2412         (*thrd_last_action)[tid] = act;
2413         if (uninit)
2414                 (*thrd_last_action)[uninit_id] = uninit;
2415
2416         if (act->is_fence() && act->is_release()) {
2417                 if ((int)thrd_last_fence_release->size() <= tid)
2418                         thrd_last_fence_release->resize(get_num_threads());
2419                 (*thrd_last_fence_release)[tid] = act;
2420         }
2421
2422         if (act->is_wait()) {
2423                 void *mutex_loc = (void *) act->get_value();
2424                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2425
2426                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2427                 if (tid >= (int)vec->size())
2428                         vec->resize(priv->next_thread_id);
2429                 (*vec)[tid].push_back(act);
2430         }
2431 }
2432
2433 /**
2434  * @brief Get the last action performed by a particular Thread
2435  * @param tid The thread ID of the Thread in question
2436  * @return The last action in the thread
2437  */
2438 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2439 {
2440         int threadid = id_to_int(tid);
2441         if (threadid < (int)thrd_last_action->size())
2442                 return (*thrd_last_action)[id_to_int(tid)];
2443         else
2444                 return NULL;
2445 }
2446
2447 /**
2448  * @brief Get the last fence release performed by a particular Thread
2449  * @param tid The thread ID of the Thread in question
2450  * @return The last fence release in the thread, if one exists; NULL otherwise
2451  */
2452 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2453 {
2454         int threadid = id_to_int(tid);
2455         if (threadid < (int)thrd_last_fence_release->size())
2456                 return (*thrd_last_fence_release)[id_to_int(tid)];
2457         else
2458                 return NULL;
2459 }
2460
2461 /**
2462  * Gets the last memory_order_seq_cst write (in the total global sequence)
2463  * performed on a particular object (i.e., memory location), not including the
2464  * current action.
2465  * @param curr The current ModelAction; also denotes the object location to
2466  * check
2467  * @return The last seq_cst write
2468  */
2469 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2470 {
2471         void *location = curr->get_location();
2472         action_list_t *list = get_safe_ptr_action(obj_map, location);
2473         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2474         action_list_t::reverse_iterator rit;
2475         for (rit = list->rbegin(); (*rit) != curr; rit++)
2476                 ;
2477         rit++; /* Skip past curr */
2478         for ( ; rit != list->rend(); rit++)
2479                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2480                         return *rit;
2481         return NULL;
2482 }
2483
2484 /**
2485  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2486  * performed in a particular thread, prior to a particular fence.
2487  * @param tid The ID of the thread to check
2488  * @param before_fence The fence from which to begin the search; if NULL, then
2489  * search for the most recent fence in the thread.
2490  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2491  */
2492 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2493 {
2494         /* All fences should have NULL location */
2495         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2496         action_list_t::reverse_iterator rit = list->rbegin();
2497
2498         if (before_fence) {
2499                 for (; rit != list->rend(); rit++)
2500                         if (*rit == before_fence)
2501                                 break;
2502
2503                 ASSERT(*rit == before_fence);
2504                 rit++;
2505         }
2506
2507         for (; rit != list->rend(); rit++)
2508                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2509                         return *rit;
2510         return NULL;
2511 }
2512
2513 /**
2514  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2515  * location). This function identifies the mutex according to the current
2516  * action, which is presumed to perform on the same mutex.
2517  * @param curr The current ModelAction; also denotes the object location to
2518  * check
2519  * @return The last unlock operation
2520  */
2521 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2522 {
2523         void *location = curr->get_location();
2524         action_list_t *list = get_safe_ptr_action(obj_map, location);
2525         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2526         action_list_t::reverse_iterator rit;
2527         for (rit = list->rbegin(); rit != list->rend(); rit++)
2528                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2529                         return *rit;
2530         return NULL;
2531 }
2532
2533 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2534 {
2535         ModelAction *parent = get_last_action(tid);
2536         if (!parent)
2537                 parent = get_thread(tid)->get_creation();
2538         return parent;
2539 }
2540
2541 /**
2542  * Returns the clock vector for a given thread.
2543  * @param tid The thread whose clock vector we want
2544  * @return Desired clock vector
2545  */
2546 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2547 {
2548         return get_parent_action(tid)->get_cv();
2549 }
2550
2551 /**
2552  * @brief Find the promise, if any to resolve for the current action
2553  * @param curr The current ModelAction. Should be a write.
2554  * @return The (non-negative) index for the Promise to resolve, if any;
2555  * otherwise -1
2556  */
2557 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2558 {
2559         for (unsigned int i = 0; i < promises->size(); i++)
2560                 if (curr->get_node()->get_promise(i))
2561                         return i;
2562         return -1;
2563 }
2564
2565 /**
2566  * Resolve a Promise with a current write.
2567  * @param write The ModelAction that is fulfilling Promises
2568  * @param promise_idx The index corresponding to the promise
2569  * @return True if the Promise was successfully resolved; false otherwise
2570  */
2571 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2572 {
2573         ModelVector<ModelAction *> actions_to_check;
2574         Promise *promise = (*promises)[promise_idx];
2575
2576         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2577                 ModelAction *read = promise->get_reader(i);
2578                 read_from(read, write);
2579                 actions_to_check.push_back(read);
2580         }
2581         /* Make sure the promise's value matches the write's value */
2582         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2583         if (!mo_graph->resolvePromise(promise, write))
2584                 priv->failed_promise = true;
2585
2586         promises->erase(promises->begin() + promise_idx);
2587         /**
2588          * @todo  It is possible to end up in an inconsistent state, where a
2589          * "resolved" promise may still be referenced if
2590          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2591          *
2592          * Note that the inconsistency only matters when dumping mo_graph to
2593          * file.
2594          *
2595          * delete promise;
2596          */
2597
2598         //Check whether reading these writes has made threads unable to
2599         //resolve promises
2600         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2601                 ModelAction *read = actions_to_check[i];
2602                 mo_check_promises(read, true);
2603         }
2604
2605         return true;
2606 }
2607
2608 /**
2609  * Compute the set of promises that could potentially be satisfied by this
2610  * action. Note that the set computation actually appears in the Node, not in
2611  * ModelChecker.
2612  * @param curr The ModelAction that may satisfy promises
2613  */
2614 void ModelChecker::compute_promises(ModelAction *curr)
2615 {
2616         for (unsigned int i = 0; i < promises->size(); i++) {
2617                 Promise *promise = (*promises)[i];
2618                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2619                         continue;
2620
2621                 bool satisfy = true;
2622                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2623                         const ModelAction *act = promise->get_reader(j);
2624                         if (act->happens_before(curr) ||
2625                                         act->could_synchronize_with(curr)) {
2626                                 satisfy = false;
2627                                 break;
2628                         }
2629                 }
2630                 if (satisfy)
2631                         curr->get_node()->set_promise(i);
2632         }
2633 }
2634
2635 /** Checks promises in response to change in ClockVector Threads. */
2636 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2637 {
2638         for (unsigned int i = 0; i < promises->size(); i++) {
2639                 Promise *promise = (*promises)[i];
2640                 if (!promise->thread_is_available(tid))
2641                         continue;
2642                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2643                         const ModelAction *act = promise->get_reader(j);
2644                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2645                                         merge_cv->synchronized_since(act)) {
2646                                 if (promise->eliminate_thread(tid)) {
2647                                         /* Promise has failed */
2648                                         priv->failed_promise = true;
2649                                         return;
2650                                 }
2651                         }
2652                 }
2653         }
2654 }
2655
2656 void ModelChecker::check_promises_thread_disabled()
2657 {
2658         for (unsigned int i = 0; i < promises->size(); i++) {
2659                 Promise *promise = (*promises)[i];
2660                 if (promise->has_failed()) {
2661                         priv->failed_promise = true;
2662                         return;
2663                 }
2664         }
2665 }
2666
2667 /**
2668  * @brief Checks promises in response to addition to modification order for
2669  * threads.
2670  *
2671  * We test whether threads are still available for satisfying promises after an
2672  * addition to our modification order constraints. Those that are unavailable
2673  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2674  * that promise has failed.
2675  *
2676  * @param act The ModelAction which updated the modification order
2677  * @param is_read_check Should be true if act is a read and we must check for
2678  * updates to the store from which it read (there is a distinction here for
2679  * RMW's, which are both a load and a store)
2680  */
2681 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2682 {
2683         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2684
2685         for (unsigned int i = 0; i < promises->size(); i++) {
2686                 Promise *promise = (*promises)[i];
2687
2688                 // Is this promise on the same location?
2689                 if (!promise->same_location(write))
2690                         continue;
2691
2692                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2693                         const ModelAction *pread = promise->get_reader(j);
2694                         if (!pread->happens_before(act))
2695                                continue;
2696                         if (mo_graph->checkPromise(write, promise)) {
2697                                 priv->failed_promise = true;
2698                                 return;
2699                         }
2700                         break;
2701                 }
2702
2703                 // Don't do any lookups twice for the same thread
2704                 if (!promise->thread_is_available(act->get_tid()))
2705                         continue;
2706
2707                 if (mo_graph->checkReachable(promise, write)) {
2708                         if (mo_graph->checkPromise(write, promise)) {
2709                                 priv->failed_promise = true;
2710                                 return;
2711                         }
2712                 }
2713         }
2714 }
2715
2716 /**
2717  * Compute the set of writes that may break the current pending release
2718  * sequence. This information is extracted from previou release sequence
2719  * calculations.
2720  *
2721  * @param curr The current ModelAction. Must be a release sequence fixup
2722  * action.
2723  */
2724 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2725 {
2726         if (pending_rel_seqs->empty())
2727                 return;
2728
2729         struct release_seq *pending = pending_rel_seqs->back();
2730         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2731                 const ModelAction *write = pending->writes[i];
2732                 curr->get_node()->add_relseq_break(write);
2733         }
2734
2735         /* NULL means don't break the sequence; just synchronize */
2736         curr->get_node()->add_relseq_break(NULL);
2737 }
2738
2739 /**
2740  * Build up an initial set of all past writes that this 'read' action may read
2741  * from, as well as any previously-observed future values that must still be valid.
2742  *
2743  * @param curr is the current ModelAction that we are exploring; it must be a
2744  * 'read' operation.
2745  */
2746 void ModelChecker::build_may_read_from(ModelAction *curr)
2747 {
2748         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2749         unsigned int i;
2750         ASSERT(curr->is_read());
2751
2752         ModelAction *last_sc_write = NULL;
2753
2754         if (curr->is_seqcst())
2755                 last_sc_write = get_last_seq_cst_write(curr);
2756
2757         /* Iterate over all threads */
2758         for (i = 0; i < thrd_lists->size(); i++) {
2759                 /* Iterate over actions in thread, starting from most recent */
2760                 action_list_t *list = &(*thrd_lists)[i];
2761                 action_list_t::reverse_iterator rit;
2762                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2763                         ModelAction *act = *rit;
2764
2765                         /* Only consider 'write' actions */
2766                         if (!act->is_write() || act == curr)
2767                                 continue;
2768
2769                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2770                         bool allow_read = true;
2771
2772                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2773                                 allow_read = false;
2774                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2775                                 allow_read = false;
2776
2777                         if (allow_read) {
2778                                 /* Only add feasible reads */
2779                                 mo_graph->startChanges();
2780                                 r_modification_order(curr, act);
2781                                 if (!is_infeasible())
2782                                         curr->get_node()->add_read_from_past(act);
2783                                 mo_graph->rollbackChanges();
2784                         }
2785
2786                         /* Include at most one act per-thread that "happens before" curr */
2787                         if (act->happens_before(curr))
2788                                 break;
2789                 }
2790         }
2791
2792         /* Inherit existing, promised future values */
2793         for (i = 0; i < promises->size(); i++) {
2794                 const Promise *promise = (*promises)[i];
2795                 const ModelAction *promise_read = promise->get_reader(0);
2796                 if (promise_read->same_var(curr)) {
2797                         /* Only add feasible future-values */
2798                         mo_graph->startChanges();
2799                         r_modification_order(curr, promise);
2800                         if (!is_infeasible())
2801                                 curr->get_node()->add_read_from_promise(promise_read);
2802                         mo_graph->rollbackChanges();
2803                 }
2804         }
2805
2806         /* We may find no valid may-read-from only if the execution is doomed */
2807         if (!curr->get_node()->read_from_size()) {
2808                 priv->no_valid_reads = true;
2809                 set_assert();
2810         }
2811
2812         if (DBG_ENABLED()) {
2813                 model_print("Reached read action:\n");
2814                 curr->print();
2815                 model_print("Printing read_from_past\n");
2816                 curr->get_node()->print_read_from_past();
2817                 model_print("End printing read_from_past\n");
2818         }
2819 }
2820
2821 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2822 {
2823         for ( ; write != NULL; write = write->get_reads_from()) {
2824                 /* UNINIT actions don't have a Node, and they never sleep */
2825                 if (write->is_uninitialized())
2826                         return true;
2827                 Node *prevnode = write->get_node()->get_parent();
2828
2829                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2830                 if (write->is_release() && thread_sleep)
2831                         return true;
2832                 if (!write->is_rmw())
2833                         return false;
2834         }
2835         return true;
2836 }
2837
2838 /**
2839  * @brief Get an action representing an uninitialized atomic
2840  *
2841  * This function may create a new one or try to retrieve one from the NodeStack
2842  *
2843  * @param curr The current action, which prompts the creation of an UNINIT action
2844  * @return A pointer to the UNINIT ModelAction
2845  */
2846 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2847 {
2848         Node *node = curr->get_node();
2849         ModelAction *act = node->get_uninit_action();
2850         if (!act) {
2851                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2852                 node->set_uninit_action(act);
2853         }
2854         act->create_cv(NULL);
2855         return act;
2856 }
2857
2858 static void print_list(action_list_t *list)
2859 {
2860         action_list_t::iterator it;
2861
2862         model_print("---------------------------------------------------------------------\n");
2863
2864         unsigned int hash = 0;
2865
2866         for (it = list->begin(); it != list->end(); it++) {
2867                 const ModelAction *act = *it;
2868                 if (act->get_seq_number() > 0)
2869                         act->print();
2870                 hash = hash^(hash<<3)^((*it)->hash());
2871         }
2872         model_print("HASH %u\n", hash);
2873         model_print("---------------------------------------------------------------------\n");
2874 }
2875
2876 #if SUPPORT_MOD_ORDER_DUMP
2877 void ModelChecker::dumpGraph(char *filename) const
2878 {
2879         char buffer[200];
2880         sprintf(buffer, "%s.dot", filename);
2881         FILE *file = fopen(buffer, "w");
2882         fprintf(file, "digraph %s {\n", filename);
2883         mo_graph->dumpNodes(file);
2884         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2885
2886         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2887                 ModelAction *act = *it;
2888                 if (act->is_read()) {
2889                         mo_graph->dot_print_node(file, act);
2890                         if (act->get_reads_from())
2891                                 mo_graph->dot_print_edge(file,
2892                                                 act->get_reads_from(),
2893                                                 act,
2894                                                 "label=\"rf\", color=red, weight=2");
2895                         else
2896                                 mo_graph->dot_print_edge(file,
2897                                                 act->get_reads_from_promise(),
2898                                                 act,
2899                                                 "label=\"rf\", color=red");
2900                 }
2901                 if (thread_array[act->get_tid()]) {
2902                         mo_graph->dot_print_edge(file,
2903                                         thread_array[id_to_int(act->get_tid())],
2904                                         act,
2905                                         "label=\"sb\", color=blue, weight=400");
2906                 }
2907
2908                 thread_array[act->get_tid()] = act;
2909         }
2910         fprintf(file, "}\n");
2911         model_free(thread_array);
2912         fclose(file);
2913 }
2914 #endif
2915
2916 /** @brief Prints an execution trace summary. */
2917 void ModelChecker::print_summary() const
2918 {
2919 #if SUPPORT_MOD_ORDER_DUMP
2920         char buffername[100];
2921         sprintf(buffername, "exec%04u", stats.num_total);
2922         mo_graph->dumpGraphToFile(buffername);
2923         sprintf(buffername, "graph%04u", stats.num_total);
2924         dumpGraph(buffername);
2925 #endif
2926
2927         model_print("Execution %d:", stats.num_total);
2928         if (isfeasibleprefix()) {
2929                 if (scheduler->all_threads_sleeping())
2930                         model_print(" SLEEP-SET REDUNDANT");
2931                 model_print("\n");
2932         } else
2933                 print_infeasibility(" INFEASIBLE");
2934         print_list(action_trace);
2935         model_print("\n");
2936         if (!promises->empty()) {
2937                 model_print("Pending promises:\n");
2938                 for (unsigned int i = 0; i < promises->size(); i++) {
2939                         model_print(" [P%u] ", i);
2940                         (*promises)[i]->print();
2941                 }
2942                 model_print("\n");
2943         }
2944 }
2945
2946 /**
2947  * Add a Thread to the system for the first time. Should only be called once
2948  * per thread.
2949  * @param t The Thread to add
2950  */
2951 void ModelChecker::add_thread(Thread *t)
2952 {
2953         thread_map->put(id_to_int(t->get_id()), t);
2954         scheduler->add_thread(t);
2955 }
2956
2957 /**
2958  * @brief Get a Thread reference by its ID
2959  * @param tid The Thread's ID
2960  * @return A Thread reference
2961  */
2962 Thread * ModelChecker::get_thread(thread_id_t tid) const
2963 {
2964         return thread_map->get(id_to_int(tid));
2965 }
2966
2967 /**
2968  * @brief Get a reference to the Thread in which a ModelAction was executed
2969  * @param act The ModelAction
2970  * @return A Thread reference
2971  */
2972 Thread * ModelChecker::get_thread(const ModelAction *act) const
2973 {
2974         return get_thread(act->get_tid());
2975 }
2976
2977 /**
2978  * @brief Get a Promise's "promise number"
2979  *
2980  * A "promise number" is an index number that is unique to a promise, valid
2981  * only for a specific snapshot of an execution trace. Promises may come and go
2982  * as they are generated an resolved, so an index only retains meaning for the
2983  * current snapshot.
2984  *
2985  * @param promise The Promise to check
2986  * @return The promise index, if the promise still is valid; otherwise -1
2987  */
2988 int ModelChecker::get_promise_number(const Promise *promise) const
2989 {
2990         for (unsigned int i = 0; i < promises->size(); i++)
2991                 if ((*promises)[i] == promise)
2992                         return i;
2993         /* Not found */
2994         return -1;
2995 }
2996
2997 /**
2998  * @brief Check if a Thread is currently enabled
2999  * @param t The Thread to check
3000  * @return True if the Thread is currently enabled
3001  */
3002 bool ModelChecker::is_enabled(Thread *t) const
3003 {
3004         return scheduler->is_enabled(t);
3005 }
3006
3007 /**
3008  * @brief Check if a Thread is currently enabled
3009  * @param tid The ID of the Thread to check
3010  * @return True if the Thread is currently enabled
3011  */
3012 bool ModelChecker::is_enabled(thread_id_t tid) const
3013 {
3014         return scheduler->is_enabled(tid);
3015 }
3016
3017 /**
3018  * Switch from a model-checker context to a user-thread context. This is the
3019  * complement of ModelChecker::switch_to_master and must be called from the
3020  * model-checker context
3021  *
3022  * @param thread The user-thread to switch to
3023  */
3024 void ModelChecker::switch_from_master(Thread *thread)
3025 {
3026         scheduler->set_current_thread(thread);
3027         Thread::swap(&system_context, thread);
3028 }
3029
3030 /**
3031  * Switch from a user-context to the "master thread" context (a.k.a. system
3032  * context). This switch is made with the intention of exploring a particular
3033  * model-checking action (described by a ModelAction object). Must be called
3034  * from a user-thread context.
3035  *
3036  * @param act The current action that will be explored. May be NULL only if
3037  * trace is exiting via an assertion (see ModelChecker::set_assert and
3038  * ModelChecker::has_asserted).
3039  * @return Return the value returned by the current action
3040  */
3041 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3042 {
3043         DBG();
3044         Thread *old = thread_current();
3045         scheduler->set_current_thread(NULL);
3046         ASSERT(!old->get_pending());
3047         old->set_pending(act);
3048         if (Thread::swap(old, &system_context) < 0) {
3049                 perror("swap threads");
3050                 exit(EXIT_FAILURE);
3051         }
3052         return old->get_return_value();
3053 }
3054
3055 /**
3056  * Takes the next step in the execution, if possible.
3057  * @param curr The current step to take
3058  * @return Returns the next Thread to run, if any; NULL if this execution
3059  * should terminate
3060  */
3061 Thread * ModelChecker::take_step(ModelAction *curr)
3062 {
3063         Thread *curr_thrd = get_thread(curr);
3064         ASSERT(curr_thrd->get_state() == THREAD_READY);
3065
3066         curr = check_current_action(curr);
3067
3068         /* Infeasible -> don't take any more steps */
3069         if (is_infeasible())
3070                 return NULL;
3071         else if (isfeasibleprefix() && have_bug_reports()) {
3072                 set_assert();
3073                 return NULL;
3074         }
3075
3076         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3077                 return NULL;
3078
3079         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3080                 scheduler->remove_thread(curr_thrd);
3081
3082         Thread *next_thrd = NULL;
3083         if (curr)
3084                 next_thrd = action_select_next_thread(curr);
3085         if (!next_thrd)
3086                 next_thrd = get_next_thread();
3087
3088         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3089                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3090
3091         return next_thrd;
3092 }
3093
3094 /** Wrapper to run the user's main function, with appropriate arguments */
3095 void user_main_wrapper(void *)
3096 {
3097         user_main(model->params.argc, model->params.argv);
3098 }
3099
3100 /** @brief Run ModelChecker for the user program */
3101 void ModelChecker::run()
3102 {
3103         do {
3104                 thrd_t user_thread;
3105                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3106                 add_thread(t);
3107
3108                 do {
3109                         /*
3110                          * Stash next pending action(s) for thread(s). There
3111                          * should only need to stash one thread's action--the
3112                          * thread which just took a step--plus the first step
3113                          * for any newly-created thread
3114                          */
3115                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3116                                 thread_id_t tid = int_to_id(i);
3117                                 Thread *thr = get_thread(tid);
3118                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3119                                         switch_from_master(thr);
3120                                         if (is_circular_wait(thr))
3121                                                 assert_bug("Deadlock detected");
3122                                 }
3123                         }
3124
3125                         /* Catch assertions from prior take_step or from
3126                          * between-ModelAction bugs (e.g., data races) */
3127                         if (has_asserted())
3128                                 break;
3129
3130                         /* Consume the next action for a Thread */
3131                         ModelAction *curr = t->get_pending();
3132                         t->set_pending(NULL);
3133                         t = take_step(curr);
3134                 } while (t && !t->is_model_thread());
3135
3136                 /*
3137                  * Launch end-of-execution release sequence fixups only when
3138                  * the execution is otherwise feasible AND there are:
3139                  *
3140                  * (1) pending release sequences
3141                  * (2) pending assertions that could be invalidated by a change
3142                  * in clock vectors (i.e., data races)
3143                  * (3) no pending promises
3144                  */
3145                 while (!pending_rel_seqs->empty() &&
3146                                 is_feasible_prefix_ignore_relseq() &&
3147                                 !unrealizedraces.empty()) {
3148                         model_print("*** WARNING: release sequence fixup action "
3149                                         "(%zu pending release seuqence(s)) ***\n",
3150                                         pending_rel_seqs->size());
3151                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3152                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3153                                         model_thread);
3154                         take_step(fixup);
3155                 };
3156         } while (next_execution());
3157
3158         model_print("******* Model-checking complete: *******\n");
3159         print_stats();
3160 }