826497945556055254e7ef8a0a32507fc7bc4716
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "snapshot-interface.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "output.h"
19 #include "traceanalysis.h"
20 #include "bugmessage.h"
21
22 #define INITIAL_THREAD_ID       0
23
24 ModelChecker *model;
25
26 /**
27  * Structure for holding small ModelChecker members that should be snapshotted
28  */
29 struct model_snapshot_members {
30         model_snapshot_members() :
31                 /* First thread created will have id INITIAL_THREAD_ID */
32                 next_thread_id(INITIAL_THREAD_ID),
33                 used_sequence_numbers(0),
34                 next_backtrack(NULL),
35                 bugs(),
36                 stats(),
37                 failed_promise(false),
38                 too_many_reads(false),
39                 no_valid_reads(false),
40                 bad_synchronization(false),
41                 asserted(false)
42         { }
43
44         ~model_snapshot_members() {
45                 for (unsigned int i = 0; i < bugs.size(); i++)
46                         delete bugs[i];
47                 bugs.clear();
48         }
49
50         unsigned int next_thread_id;
51         modelclock_t used_sequence_numbers;
52         ModelAction *next_backtrack;
53         SnapVector<bug_message *> bugs;
54         struct execution_stats stats;
55         bool failed_promise;
56         bool too_many_reads;
57         bool no_valid_reads;
58         /** @brief Incorrectly-ordered synchronization was made */
59         bool bad_synchronization;
60         bool asserted;
61
62         SNAPSHOTALLOC
63 };
64
65 /** @brief Constructor */
66 ModelChecker::ModelChecker(struct model_params params) :
67         /* Initialize default scheduler */
68         params(params),
69         scheduler(new Scheduler()),
70         diverge(NULL),
71         earliest_diverge(NULL),
72         action_trace(new action_list_t()),
73         thread_map(new HashTable<int, Thread *, int>()),
74         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
75         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
76         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
77         promises(new SnapVector<Promise *>()),
78         futurevalues(new SnapVector<struct PendingFutureValue>()),
79         pending_rel_seqs(new SnapVector<struct release_seq *>()),
80         thrd_last_action(new SnapVector<ModelAction *>(1)),
81         thrd_last_fence_release(new SnapVector<ModelAction *>()),
82         node_stack(new NodeStack()),
83         trace_analyses(new ModelVector<Trace_Analysis *>()),
84         priv(new struct model_snapshot_members()),
85         mo_graph(new CycleGraph())
86 {
87         /* Initialize a model-checker thread, for special ModelActions */
88         model_thread = new Thread(get_next_id());
89         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
90 }
91
92 /** @brief Destructor */
93 ModelChecker::~ModelChecker()
94 {
95         for (unsigned int i = 0; i < get_num_threads(); i++)
96                 delete thread_map->get(i);
97         delete thread_map;
98
99         delete obj_thrd_map;
100         delete obj_map;
101         delete condvar_waiters_map;
102         delete action_trace;
103
104         for (unsigned int i = 0; i < promises->size(); i++)
105                 delete (*promises)[i];
106         delete promises;
107
108         delete pending_rel_seqs;
109
110         delete thrd_last_action;
111         delete thrd_last_fence_release;
112         delete node_stack;
113         for (unsigned int i = 0; i < trace_analyses->size(); i++)
114                 delete (*trace_analyses)[i];
115         delete trace_analyses;
116         delete scheduler;
117         delete mo_graph;
118         delete priv;
119 }
120
121 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
122 {
123         action_list_t *tmp = hash->get(ptr);
124         if (tmp == NULL) {
125                 tmp = new action_list_t();
126                 hash->put(ptr, tmp);
127         }
128         return tmp;
129 }
130
131 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
132 {
133         SnapVector<action_list_t> *tmp = hash->get(ptr);
134         if (tmp == NULL) {
135                 tmp = new SnapVector<action_list_t>();
136                 hash->put(ptr, tmp);
137         }
138         return tmp;
139 }
140
141 action_list_t * ModelChecker::get_actions_on_obj(void * obj, thread_id_t tid) {
142         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
143         if (wrv==NULL)
144                 return NULL;
145         unsigned int thread=id_to_int(tid);
146         if (thread < wrv->size())
147                 return &(*wrv)[thread];
148         else
149                 return NULL;
150 }
151
152
153 /**
154  * Restores user program to initial state and resets all model-checker data
155  * structures.
156  */
157 void ModelChecker::reset_to_initial_state()
158 {
159         DEBUG("+++ Resetting to initial state +++\n");
160         node_stack->reset_execution();
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_behaviors()) {
264                         /* Execute the same thread with a new behavior */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else {
268                         ASSERT(prevnode);
269                         /* Make a different thread execute for next step */
270                         scheduler->add_sleep(get_thread(next->get_tid()));
271                         tid = prevnode->get_next_backtrack();
272                         /* Make sure the backtracked thread isn't sleeping. */
273                         node_stack->pop_restofstack(1);
274                         if (diverge == earliest_diverge) {
275                                 earliest_diverge = prevnode->get_action();
276                         }
277                 }
278                 /* Start the round robin scheduler from this thread id */
279                 scheduler->set_scheduler_thread(tid);
280                 /* The correct sleep set is in the parent node. */
281                 execute_sleep_set();
282
283                 DEBUG("*** Divergence point ***\n");
284
285                 diverge = NULL;
286         } else {
287                 tid = next->get_tid();
288         }
289         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
290         ASSERT(tid != THREAD_ID_T_NONE);
291         return get_thread(id_to_int(tid));
292 }
293
294 /**
295  * We need to know what the next actions of all threads in the sleep
296  * set will be.  This method computes them and stores the actions at
297  * the corresponding thread object's pending action.
298  */
299
300 void ModelChecker::execute_sleep_set()
301 {
302         for (unsigned int i = 0; i < get_num_threads(); i++) {
303                 thread_id_t tid = int_to_id(i);
304                 Thread *thr = get_thread(tid);
305                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
306                         thr->get_pending()->set_sleep_flag();
307                 }
308         }
309 }
310
311 /**
312  * @brief Should the current action wake up a given thread?
313  *
314  * @param curr The current action
315  * @param thread The thread that we might wake up
316  * @return True, if we should wake up the sleeping thread; false otherwise
317  */
318 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
319 {
320         const ModelAction *asleep = thread->get_pending();
321         /* Don't allow partial RMW to wake anyone up */
322         if (curr->is_rmwr())
323                 return false;
324         /* Synchronizing actions may have been backtracked */
325         if (asleep->could_synchronize_with(curr))
326                 return true;
327         /* All acquire/release fences and fence-acquire/store-release */
328         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
329                 return true;
330         /* Fence-release + store can awake load-acquire on the same location */
331         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
332                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
333                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
334                         return true;
335         }
336         return false;
337 }
338
339 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
340 {
341         for (unsigned int i = 0; i < get_num_threads(); i++) {
342                 Thread *thr = get_thread(int_to_id(i));
343                 if (scheduler->is_sleep_set(thr)) {
344                         if (should_wake_up(curr, thr))
345                                 /* Remove this thread from sleep set */
346                                 scheduler->remove_sleep(thr);
347                 }
348         }
349 }
350
351 /** @brief Alert the model-checker that an incorrectly-ordered
352  * synchronization was made */
353 void ModelChecker::set_bad_synchronization()
354 {
355         priv->bad_synchronization = true;
356 }
357
358 /**
359  * Check whether the current trace has triggered an assertion which should halt
360  * its execution.
361  *
362  * @return True, if the execution should be aborted; false otherwise
363  */
364 bool ModelChecker::has_asserted() const
365 {
366         return priv->asserted;
367 }
368
369 /**
370  * Trigger a trace assertion which should cause this execution to be halted.
371  * This can be due to a detected bug or due to an infeasibility that should
372  * halt ASAP.
373  */
374 void ModelChecker::set_assert()
375 {
376         priv->asserted = true;
377 }
378
379 /**
380  * Check if we are in a deadlock. Should only be called at the end of an
381  * execution, although it should not give false positives in the middle of an
382  * execution (there should be some ENABLED thread).
383  *
384  * @return True if program is in a deadlock; false otherwise
385  */
386 bool ModelChecker::is_deadlocked() const
387 {
388         bool blocking_threads = false;
389         for (unsigned int i = 0; i < get_num_threads(); i++) {
390                 thread_id_t tid = int_to_id(i);
391                 if (is_enabled(tid))
392                         return false;
393                 Thread *t = get_thread(tid);
394                 if (!t->is_model_thread() && t->get_pending())
395                         blocking_threads = true;
396         }
397         return blocking_threads;
398 }
399
400 /**
401  * Check if this is a complete execution. That is, have all thread completed
402  * execution (rather than exiting because sleep sets have forced a redundant
403  * execution).
404  *
405  * @return True if the execution is complete.
406  */
407 bool ModelChecker::is_complete_execution() const
408 {
409         for (unsigned int i = 0; i < get_num_threads(); i++)
410                 if (is_enabled(int_to_id(i)))
411                         return false;
412         return true;
413 }
414
415 /**
416  * @brief Assert a bug in the executing program.
417  *
418  * Use this function to assert any sort of bug in the user program. If the
419  * current trace is feasible (actually, a prefix of some feasible execution),
420  * then this execution will be aborted, printing the appropriate message. If
421  * the current trace is not yet feasible, the error message will be stashed and
422  * printed if the execution ever becomes feasible.
423  *
424  * @param msg Descriptive message for the bug (do not include newline char)
425  * @return True if bug is immediately-feasible
426  */
427 bool ModelChecker::assert_bug(const char *msg, ...)
428 {
429         char str[800];
430
431         va_list ap;
432         va_start(ap, msg);
433         vsnprintf(str, sizeof(str), msg, ap);
434         va_end(ap);
435
436         priv->bugs.push_back(new bug_message(str));
437
438         if (isfeasibleprefix()) {
439                 set_assert();
440                 return true;
441         }
442         return false;
443 }
444
445 /**
446  * @brief Assert a bug in the executing program, asserted by a user thread
447  * @see ModelChecker::assert_bug
448  * @param msg Descriptive message for the bug (do not include newline char)
449  */
450 void ModelChecker::assert_user_bug(const char *msg)
451 {
452         /* If feasible bug, bail out now */
453         if (assert_bug(msg))
454                 switch_to_master(NULL);
455 }
456
457 /** @return True, if any bugs have been reported for this execution */
458 bool ModelChecker::have_bug_reports() const
459 {
460         return priv->bugs.size() != 0;
461 }
462
463 /** @brief Print bug report listing for this execution (if any bugs exist) */
464 void ModelChecker::print_bugs() const
465 {
466         if (have_bug_reports()) {
467                 model_print("Bug report: %zu bug%s detected\n",
468                                 priv->bugs.size(),
469                                 priv->bugs.size() > 1 ? "s" : "");
470                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
471                         priv->bugs[i]->print();
472         }
473 }
474
475 /**
476  * @brief Record end-of-execution stats
477  *
478  * Must be run when exiting an execution. Records various stats.
479  * @see struct execution_stats
480  */
481 void ModelChecker::record_stats()
482 {
483         stats.num_total++;
484         if (!isfeasibleprefix())
485                 stats.num_infeasible++;
486         else if (have_bug_reports())
487                 stats.num_buggy_executions++;
488         else if (is_complete_execution())
489                 stats.num_complete++;
490         else {
491                 stats.num_redundant++;
492
493                 /**
494                  * @todo We can violate this ASSERT() when fairness/sleep sets
495                  * conflict to cause an execution to terminate, e.g. with:
496                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
497                  */
498                 //ASSERT(scheduler->all_threads_sleeping());
499         }
500 }
501
502 /** @brief Print execution stats */
503 void ModelChecker::print_stats() const
504 {
505         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
506         model_print("Number of redundant executions: %d\n", stats.num_redundant);
507         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
508         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
509         model_print("Total executions: %d\n", stats.num_total);
510         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
511 }
512
513 /**
514  * @brief End-of-exeuction print
515  * @param printbugs Should any existing bugs be printed?
516  */
517 void ModelChecker::print_execution(bool printbugs) const
518 {
519         print_program_output();
520
521         if (params.verbose) {
522                 model_print("Earliest divergence point since last feasible execution:\n");
523                 if (earliest_diverge)
524                         earliest_diverge->print();
525                 else
526                         model_print("(Not set)\n");
527
528                 model_print("\n");
529                 print_stats();
530         }
531
532         /* Don't print invalid bugs */
533         if (printbugs)
534                 print_bugs();
535
536         model_print("\n");
537         print_summary();
538 }
539
540 /**
541  * Queries the model-checker for more executions to explore and, if one
542  * exists, resets the model-checker state to execute a new execution.
543  *
544  * @return If there are more executions to explore, return true. Otherwise,
545  * return false.
546  */
547 bool ModelChecker::next_execution()
548 {
549         DBG();
550         /* Is this execution a feasible execution that's worth bug-checking? */
551         bool complete = isfeasibleprefix() && (is_complete_execution() ||
552                         have_bug_reports());
553
554         /* End-of-execution bug checks */
555         if (complete) {
556                 if (is_deadlocked())
557                         assert_bug("Deadlock detected");
558
559                 checkDataRaces();
560                 run_trace_analyses();
561         }
562
563         record_stats();
564
565         /* Output */
566         if (params.verbose || (complete && have_bug_reports()))
567                 print_execution(complete);
568         else
569                 clear_program_output();
570
571         if (complete)
572                 earliest_diverge = NULL;
573
574         if ((diverge = get_next_backtrack()) == NULL)
575                 return false;
576
577         if (DBG_ENABLED()) {
578                 model_print("Next execution will diverge at:\n");
579                 diverge->print();
580         }
581
582         reset_to_initial_state();
583         return true;
584 }
585
586 /** @brief Run trace analyses on complete trace */
587 void ModelChecker::run_trace_analyses() {
588         for (unsigned int i = 0; i < trace_analyses->size(); i++)
589                 (*trace_analyses)[i]->analyze(action_trace);
590 }
591
592 /**
593  * @brief Find the last fence-related backtracking conflict for a ModelAction
594  *
595  * This function performs the search for the most recent conflicting action
596  * against which we should perform backtracking, as affected by fence
597  * operations. This includes pairs of potentially-synchronizing actions which
598  * occur due to fence-acquire or fence-release, and hence should be explored in
599  * the opposite execution order.
600  *
601  * @param act The current action
602  * @return The most recent action which conflicts with act due to fences
603  */
604 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
605 {
606         /* Only perform release/acquire fence backtracking for stores */
607         if (!act->is_write())
608                 return NULL;
609
610         /* Find a fence-release (or, act is a release) */
611         ModelAction *last_release;
612         if (act->is_release())
613                 last_release = act;
614         else
615                 last_release = get_last_fence_release(act->get_tid());
616         if (!last_release)
617                 return NULL;
618
619         /* Skip past the release */
620         action_list_t *list = action_trace;
621         action_list_t::reverse_iterator rit;
622         for (rit = list->rbegin(); rit != list->rend(); rit++)
623                 if (*rit == last_release)
624                         break;
625         ASSERT(rit != list->rend());
626
627         /* Find a prior:
628          *   load-acquire
629          * or
630          *   load --sb-> fence-acquire */
631         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
632         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
633         bool found_acquire_fences = false;
634         for ( ; rit != list->rend(); rit++) {
635                 ModelAction *prev = *rit;
636                 if (act->same_thread(prev))
637                         continue;
638
639                 int tid = id_to_int(prev->get_tid());
640
641                 if (prev->is_read() && act->same_var(prev)) {
642                         if (prev->is_acquire()) {
643                                 /* Found most recent load-acquire, don't need
644                                  * to search for more fences */
645                                 if (!found_acquire_fences)
646                                         return NULL;
647                         } else {
648                                 prior_loads[tid] = prev;
649                         }
650                 }
651                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
652                         found_acquire_fences = true;
653                         acquire_fences[tid] = prev;
654                 }
655         }
656
657         ModelAction *latest_backtrack = NULL;
658         for (unsigned int i = 0; i < acquire_fences.size(); i++)
659                 if (acquire_fences[i] && prior_loads[i])
660                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
661                                 latest_backtrack = acquire_fences[i];
662         return latest_backtrack;
663 }
664
665 /**
666  * @brief Find the last backtracking conflict for a ModelAction
667  *
668  * This function performs the search for the most recent conflicting action
669  * against which we should perform backtracking. This primary includes pairs of
670  * synchronizing actions which should be explored in the opposite execution
671  * order.
672  *
673  * @param act The current action
674  * @return The most recent action which conflicts with act
675  */
676 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
677 {
678         switch (act->get_type()) {
679         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
680         case ATOMIC_READ:
681         case ATOMIC_WRITE:
682         case ATOMIC_RMW: {
683                 ModelAction *ret = NULL;
684
685                 /* linear search: from most recent to oldest */
686                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
687                 action_list_t::reverse_iterator rit;
688                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
689                         ModelAction *prev = *rit;
690                         if (prev->could_synchronize_with(act)) {
691                                 ret = prev;
692                                 break;
693                         }
694                 }
695
696                 ModelAction *ret2 = get_last_fence_conflict(act);
697                 if (!ret2)
698                         return ret;
699                 if (!ret)
700                         return ret2;
701                 if (*ret < *ret2)
702                         return ret2;
703                 return ret;
704         }
705         case ATOMIC_LOCK:
706         case ATOMIC_TRYLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (act->is_conflicting_lock(prev))
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_UNLOCK: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                 }
726                 break;
727         }
728         case ATOMIC_WAIT: {
729                 /* linear search: from most recent to oldest */
730                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
731                 action_list_t::reverse_iterator rit;
732                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
733                         ModelAction *prev = *rit;
734                         if (!act->same_thread(prev) && prev->is_failed_trylock())
735                                 return prev;
736                         if (!act->same_thread(prev) && prev->is_notify())
737                                 return prev;
738                 }
739                 break;
740         }
741
742         case ATOMIC_NOTIFY_ALL:
743         case ATOMIC_NOTIFY_ONE: {
744                 /* linear search: from most recent to oldest */
745                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
746                 action_list_t::reverse_iterator rit;
747                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
748                         ModelAction *prev = *rit;
749                         if (!act->same_thread(prev) && prev->is_wait())
750                                 return prev;
751                 }
752                 break;
753         }
754         default:
755                 break;
756         }
757         return NULL;
758 }
759
760 /** This method finds backtracking points where we should try to
761  * reorder the parameter ModelAction against.
762  *
763  * @param the ModelAction to find backtracking points for.
764  */
765 void ModelChecker::set_backtracking(ModelAction *act)
766 {
767         Thread *t = get_thread(act);
768         ModelAction *prev = get_last_conflict(act);
769         if (prev == NULL)
770                 return;
771
772         Node *node = prev->get_node()->get_parent();
773
774         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
775         int low_tid, high_tid;
776         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
777                 low_tid = id_to_int(act->get_tid());
778                 high_tid = low_tid + 1;
779         } else {
780                 low_tid = 0;
781                 high_tid = get_num_threads();
782         }
783
784         for (int i = low_tid; i < high_tid; i++) {
785                 thread_id_t tid = int_to_id(i);
786
787                 /* Make sure this thread can be enabled here. */
788                 if (i >= node->get_num_threads())
789                         break;
790
791                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
792                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
793                 if (node->enabled_status(tid) != THREAD_ENABLED)
794                         continue;
795
796                 /* Check if this has been explored already */
797                 if (node->has_been_explored(tid))
798                         continue;
799
800                 /* See if fairness allows */
801                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
802                         bool unfair = false;
803                         for (int t = 0; t < node->get_num_threads(); t++) {
804                                 thread_id_t tother = int_to_id(t);
805                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
806                                         unfair = true;
807                                         break;
808                                 }
809                         }
810                         if (unfair)
811                                 continue;
812                 }
813
814                 /* See if CHESS-like yield fairness allows */
815                 if (model->params.yieldon) {
816                         bool unfair = false;
817                         for (int t = 0; t < node->get_num_threads(); t++) {
818                                 thread_id_t tother = int_to_id(t);
819                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
820                                         unfair = true;
821                                         break;
822                                 }
823                         }
824                         if (unfair)
825                                 continue;
826                 }
827                 
828                 /* Cache the latest backtracking point */
829                 set_latest_backtrack(prev);
830
831                 /* If this is a new backtracking point, mark the tree */
832                 if (!node->set_backtrack(tid))
833                         continue;
834                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
835                                         id_to_int(prev->get_tid()),
836                                         id_to_int(t->get_id()));
837                 if (DBG_ENABLED()) {
838                         prev->print();
839                         act->print();
840                 }
841         }
842 }
843
844 /**
845  * @brief Cache the a backtracking point as the "most recent", if eligible
846  *
847  * Note that this does not prepare the NodeStack for this backtracking
848  * operation, it only caches the action on a per-execution basis
849  *
850  * @param act The operation at which we should explore a different next action
851  * (i.e., backtracking point)
852  * @return True, if this action is now the most recent backtracking point;
853  * false otherwise
854  */
855 bool ModelChecker::set_latest_backtrack(ModelAction *act)
856 {
857         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
858                 priv->next_backtrack = act;
859                 return true;
860         }
861         return false;
862 }
863
864 /**
865  * Returns last backtracking point. The model checker will explore a different
866  * path for this point in the next execution.
867  * @return The ModelAction at which the next execution should diverge.
868  */
869 ModelAction * ModelChecker::get_next_backtrack()
870 {
871         ModelAction *next = priv->next_backtrack;
872         priv->next_backtrack = NULL;
873         return next;
874 }
875
876 /**
877  * Processes a read model action.
878  * @param curr is the read model action to process.
879  * @return True if processing this read updates the mo_graph.
880  */
881 bool ModelChecker::process_read(ModelAction *curr)
882 {
883         Node *node = curr->get_node();
884         while (true) {
885                 bool updated = false;
886                 switch (node->get_read_from_status()) {
887                 case READ_FROM_PAST: {
888                         const ModelAction *rf = node->get_read_from_past();
889                         ASSERT(rf);
890
891                         mo_graph->startChanges();
892
893                         ASSERT(!is_infeasible());
894                         if (!check_recency(curr, rf)) {
895                                 if (node->increment_read_from()) {
896                                         mo_graph->rollbackChanges();
897                                         continue;
898                                 } else {
899                                         priv->too_many_reads = true;
900                                 }
901                         }
902
903                         updated = r_modification_order(curr, rf);
904                         read_from(curr, rf);
905                         mo_graph->commitChanges();
906                         mo_check_promises(curr, true);
907                         break;
908                 }
909                 case READ_FROM_PROMISE: {
910                         Promise *promise = curr->get_node()->get_read_from_promise();
911                         if (promise->add_reader(curr))
912                                 priv->failed_promise = true;
913                         curr->set_read_from_promise(promise);
914                         mo_graph->startChanges();
915                         if (!check_recency(curr, promise))
916                                 priv->too_many_reads = true;
917                         updated = r_modification_order(curr, promise);
918                         mo_graph->commitChanges();
919                         break;
920                 }
921                 case READ_FROM_FUTURE: {
922                         /* Read from future value */
923                         struct future_value fv = node->get_future_value();
924                         Promise *promise = new Promise(curr, fv);
925                         curr->set_read_from_promise(promise);
926                         promises->push_back(promise);
927                         mo_graph->startChanges();
928                         updated = r_modification_order(curr, promise);
929                         mo_graph->commitChanges();
930                         break;
931                 }
932                 default:
933                         ASSERT(false);
934                 }
935                 get_thread(curr)->set_return_value(curr->get_return_value());
936                 return updated;
937         }
938 }
939
940 /**
941  * Processes a lock, trylock, or unlock model action.  @param curr is
942  * the read model action to process.
943  *
944  * The try lock operation checks whether the lock is taken.  If not,
945  * it falls to the normal lock operation case.  If so, it returns
946  * fail.
947  *
948  * The lock operation has already been checked that it is enabled, so
949  * it just grabs the lock and synchronizes with the previous unlock.
950  *
951  * The unlock operation has to re-enable all of the threads that are
952  * waiting on the lock.
953  *
954  * @return True if synchronization was updated; false otherwise
955  */
956 bool ModelChecker::process_mutex(ModelAction *curr)
957 {
958         std::mutex *mutex = curr->get_mutex();
959         struct std::mutex_state *state = NULL;
960
961         if (mutex)
962                 state = mutex->get_state();
963
964         switch (curr->get_type()) {
965         case ATOMIC_TRYLOCK: {
966                 bool success = !state->locked;
967                 curr->set_try_lock(success);
968                 if (!success) {
969                         get_thread(curr)->set_return_value(0);
970                         break;
971                 }
972                 get_thread(curr)->set_return_value(1);
973         }
974                 //otherwise fall into the lock case
975         case ATOMIC_LOCK: {
976                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
977                         assert_bug("Lock access before initialization");
978                 state->locked = get_thread(curr);
979                 ModelAction *unlock = get_last_unlock(curr);
980                 //synchronize with the previous unlock statement
981                 if (unlock != NULL) {
982                         curr->synchronize_with(unlock);
983                         return true;
984                 }
985                 break;
986         }
987         case ATOMIC_WAIT:
988         case ATOMIC_UNLOCK: {
989                 /* wake up the other threads */
990                 for (unsigned int i = 0; i < get_num_threads(); i++) {
991                         Thread *t = get_thread(int_to_id(i));
992                         Thread *curr_thrd = get_thread(curr);
993                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
994                                 scheduler->wake(t);
995                 }
996
997                 /* unlock the lock - after checking who was waiting on it */
998                 state->locked = NULL;
999
1000                 if (!curr->is_wait())
1001                         break; /* The rest is only for ATOMIC_WAIT */
1002
1003                 /* Should we go to sleep? (simulate spurious failures) */
1004                 if (curr->get_node()->get_misc() == 0) {
1005                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1006                         /* disable us */
1007                         scheduler->sleep(get_thread(curr));
1008                 }
1009                 break;
1010         }
1011         case ATOMIC_NOTIFY_ALL: {
1012                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1013                 //activate all the waiting threads
1014                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1015                         scheduler->wake(get_thread(*rit));
1016                 }
1017                 waiters->clear();
1018                 break;
1019         }
1020         case ATOMIC_NOTIFY_ONE: {
1021                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1022                 int wakeupthread = curr->get_node()->get_misc();
1023                 action_list_t::iterator it = waiters->begin();
1024                 advance(it, wakeupthread);
1025                 scheduler->wake(get_thread(*it));
1026                 waiters->erase(it);
1027                 break;
1028         }
1029
1030         default:
1031                 ASSERT(0);
1032         }
1033         return false;
1034 }
1035
1036 /**
1037  * @brief Check if the current pending promises allow a future value to be sent
1038  *
1039  * If one of the following is true:
1040  *  (a) there are no pending promises
1041  *  (b) the reader and writer do not cross any promises
1042  * Then, it is safe to pass a future value back now.
1043  *
1044  * Otherwise, we must save the pending future value until (a) or (b) is true
1045  *
1046  * @param writer The operation which sends the future value. Must be a write.
1047  * @param reader The operation which will observe the value. Must be a read.
1048  * @return True if the future value can be sent now; false if it must wait.
1049  */
1050 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1051                 const ModelAction *reader) const
1052 {
1053         if (promises->empty())
1054                 return true;
1055         for(int i=promises->size()-1;i>=0;i--) {
1056                 ModelAction *pr=(*promises)[i]->get_reader(0);
1057                 //reader is after promise...doesn't cross any promise
1058                 if (*reader > *pr)
1059                         return true;
1060                 //writer is after promise, reader before...bad...
1061                 if (*writer > *pr)
1062                         return false;
1063         }
1064         return true;
1065 }
1066
1067 /**
1068  * @brief Add a future value to a reader
1069  *
1070  * This function performs a few additional checks to ensure that the future
1071  * value can be feasibly observed by the reader
1072  *
1073  * @param writer The operation whose value is sent. Must be a write.
1074  * @param reader The read operation which may read the future value. Must be a read.
1075  */
1076 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1077 {
1078         /* Do more ambitious checks now that mo is more complete */
1079         if (!mo_may_allow(writer, reader))
1080                 return;
1081
1082         Node *node = reader->get_node();
1083
1084         /* Find an ancestor thread which exists at the time of the reader */
1085         Thread *write_thread = get_thread(writer);
1086         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1087                 write_thread = write_thread->get_parent();
1088
1089         struct future_value fv = {
1090                 writer->get_write_value(),
1091                 writer->get_seq_number() + params.maxfuturedelay,
1092                 write_thread->get_id(),
1093         };
1094         if (node->add_future_value(fv))
1095                 set_latest_backtrack(reader);
1096 }
1097
1098 /**
1099  * Process a write ModelAction
1100  * @param curr The ModelAction to process
1101  * @return True if the mo_graph was updated or promises were resolved
1102  */
1103 bool ModelChecker::process_write(ModelAction *curr)
1104 {
1105         /* Readers to which we may send our future value */
1106         ModelVector<ModelAction *> send_fv;
1107
1108         const ModelAction *earliest_promise_reader;
1109         bool updated_promises = false;
1110
1111         bool updated_mod_order = w_modification_order(curr, &send_fv);
1112         Promise *promise = pop_promise_to_resolve(curr);
1113
1114         if (promise) {
1115                 earliest_promise_reader = promise->get_reader(0);
1116                 updated_promises = resolve_promise(curr, promise);
1117         } else
1118                 earliest_promise_reader = NULL;
1119
1120         for (unsigned int i = 0; i < send_fv.size(); i++) {
1121                 ModelAction *read = send_fv[i];
1122
1123                 /* Don't send future values to reads after the Promise we resolve */
1124                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1125                         /* Check if future value can be sent immediately */
1126                         if (promises_may_allow(curr, read)) {
1127                                 add_future_value(curr, read);
1128                         } else {
1129                                 futurevalues->push_back(PendingFutureValue(curr, read));
1130                         }
1131                 }
1132         }
1133
1134         /* Check the pending future values */
1135         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1136                 struct PendingFutureValue pfv = (*futurevalues)[i];
1137                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1138                         add_future_value(pfv.writer, pfv.reader);
1139                         futurevalues->erase(futurevalues->begin() + i);
1140                 }
1141         }
1142
1143         mo_graph->commitChanges();
1144         mo_check_promises(curr, false);
1145
1146         get_thread(curr)->set_return_value(VALUE_NONE);
1147         return updated_mod_order || updated_promises;
1148 }
1149
1150 /**
1151  * Process a fence ModelAction
1152  * @param curr The ModelAction to process
1153  * @return True if synchronization was updated
1154  */
1155 bool ModelChecker::process_fence(ModelAction *curr)
1156 {
1157         /*
1158          * fence-relaxed: no-op
1159          * fence-release: only log the occurence (not in this function), for
1160          *   use in later synchronization
1161          * fence-acquire (this function): search for hypothetical release
1162          *   sequences
1163          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1164          */
1165         bool updated = false;
1166         if (curr->is_acquire()) {
1167                 action_list_t *list = action_trace;
1168                 action_list_t::reverse_iterator rit;
1169                 /* Find X : is_read(X) && X --sb-> curr */
1170                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1171                         ModelAction *act = *rit;
1172                         if (act == curr)
1173                                 continue;
1174                         if (act->get_tid() != curr->get_tid())
1175                                 continue;
1176                         /* Stop at the beginning of the thread */
1177                         if (act->is_thread_start())
1178                                 break;
1179                         /* Stop once we reach a prior fence-acquire */
1180                         if (act->is_fence() && act->is_acquire())
1181                                 break;
1182                         if (!act->is_read())
1183                                 continue;
1184                         /* read-acquire will find its own release sequences */
1185                         if (act->is_acquire())
1186                                 continue;
1187
1188                         /* Establish hypothetical release sequences */
1189                         rel_heads_list_t release_heads;
1190                         get_release_seq_heads(curr, act, &release_heads);
1191                         for (unsigned int i = 0; i < release_heads.size(); i++)
1192                                 if (!curr->synchronize_with(release_heads[i]))
1193                                         set_bad_synchronization();
1194                         if (release_heads.size() != 0)
1195                                 updated = true;
1196                 }
1197         }
1198         return updated;
1199 }
1200
1201 /**
1202  * @brief Process the current action for thread-related activity
1203  *
1204  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1205  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1206  * synchronization, etc.  This function is a no-op for non-THREAD actions
1207  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1208  *
1209  * @param curr The current action
1210  * @return True if synchronization was updated or a thread completed
1211  */
1212 bool ModelChecker::process_thread_action(ModelAction *curr)
1213 {
1214         bool updated = false;
1215
1216         switch (curr->get_type()) {
1217         case THREAD_CREATE: {
1218                 thrd_t *thrd = (thrd_t *)curr->get_location();
1219                 struct thread_params *params = (struct thread_params *)curr->get_value();
1220                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1221                 add_thread(th);
1222                 th->set_creation(curr);
1223                 /* Promises can be satisfied by children */
1224                 for (unsigned int i = 0; i < promises->size(); i++) {
1225                         Promise *promise = (*promises)[i];
1226                         if (promise->thread_is_available(curr->get_tid()))
1227                                 promise->add_thread(th->get_id());
1228                 }
1229                 break;
1230         }
1231         case THREAD_JOIN: {
1232                 Thread *blocking = curr->get_thread_operand();
1233                 ModelAction *act = get_last_action(blocking->get_id());
1234                 curr->synchronize_with(act);
1235                 updated = true; /* trigger rel-seq checks */
1236                 break;
1237         }
1238         case THREAD_FINISH: {
1239                 Thread *th = get_thread(curr);
1240                 /* Wake up any joining threads */
1241                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1242                         Thread *waiting = get_thread(int_to_id(i));
1243                         if (waiting->waiting_on() == th &&
1244                                         waiting->get_pending()->is_thread_join())
1245                                 scheduler->wake(waiting);
1246                 }
1247                 th->complete();
1248                 /* Completed thread can't satisfy promises */
1249                 for (unsigned int i = 0; i < promises->size(); i++) {
1250                         Promise *promise = (*promises)[i];
1251                         if (promise->thread_is_available(th->get_id()))
1252                                 if (promise->eliminate_thread(th->get_id()))
1253                                         priv->failed_promise = true;
1254                 }
1255                 updated = true; /* trigger rel-seq checks */
1256                 break;
1257         }
1258         case THREAD_START: {
1259                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1260                 break;
1261         }
1262         default:
1263                 break;
1264         }
1265
1266         return updated;
1267 }
1268
1269 /**
1270  * @brief Process the current action for release sequence fixup activity
1271  *
1272  * Performs model-checker release sequence fixups for the current action,
1273  * forcing a single pending release sequence to break (with a given, potential
1274  * "loose" write) or to complete (i.e., synchronize). If a pending release
1275  * sequence forms a complete release sequence, then we must perform the fixup
1276  * synchronization, mo_graph additions, etc.
1277  *
1278  * @param curr The current action; must be a release sequence fixup action
1279  * @param work_queue The work queue to which to add work items as they are
1280  * generated
1281  */
1282 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1283 {
1284         const ModelAction *write = curr->get_node()->get_relseq_break();
1285         struct release_seq *sequence = pending_rel_seqs->back();
1286         pending_rel_seqs->pop_back();
1287         ASSERT(sequence);
1288         ModelAction *acquire = sequence->acquire;
1289         const ModelAction *rf = sequence->rf;
1290         const ModelAction *release = sequence->release;
1291         ASSERT(acquire);
1292         ASSERT(release);
1293         ASSERT(rf);
1294         ASSERT(release->same_thread(rf));
1295
1296         if (write == NULL) {
1297                 /**
1298                  * @todo Forcing a synchronization requires that we set
1299                  * modification order constraints. For instance, we can't allow
1300                  * a fixup sequence in which two separate read-acquire
1301                  * operations read from the same sequence, where the first one
1302                  * synchronizes and the other doesn't. Essentially, we can't
1303                  * allow any writes to insert themselves between 'release' and
1304                  * 'rf'
1305                  */
1306
1307                 /* Must synchronize */
1308                 if (!acquire->synchronize_with(release)) {
1309                         set_bad_synchronization();
1310                         return;
1311                 }
1312                 /* Re-check all pending release sequences */
1313                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1314                 /* Re-check act for mo_graph edges */
1315                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1316
1317                 /* propagate synchronization to later actions */
1318                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1319                 for (; (*rit) != acquire; rit++) {
1320                         ModelAction *propagate = *rit;
1321                         if (acquire->happens_before(propagate)) {
1322                                 propagate->synchronize_with(acquire);
1323                                 /* Re-check 'propagate' for mo_graph edges */
1324                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1325                         }
1326                 }
1327         } else {
1328                 /* Break release sequence with new edges:
1329                  *   release --mo--> write --mo--> rf */
1330                 mo_graph->addEdge(release, write);
1331                 mo_graph->addEdge(write, rf);
1332         }
1333
1334         /* See if we have realized a data race */
1335         checkDataRaces();
1336 }
1337
1338 /**
1339  * Initialize the current action by performing one or more of the following
1340  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1341  * in the NodeStack, manipulating backtracking sets, allocating and
1342  * initializing clock vectors, and computing the promises to fulfill.
1343  *
1344  * @param curr The current action, as passed from the user context; may be
1345  * freed/invalidated after the execution of this function, with a different
1346  * action "returned" its place (pass-by-reference)
1347  * @return True if curr is a newly-explored action; false otherwise
1348  */
1349 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1350 {
1351         ModelAction *newcurr;
1352
1353         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1354                 newcurr = process_rmw(*curr);
1355                 delete *curr;
1356
1357                 if (newcurr->is_rmw())
1358                         compute_promises(newcurr);
1359
1360                 *curr = newcurr;
1361                 return false;
1362         }
1363
1364         (*curr)->set_seq_number(get_next_seq_num());
1365
1366         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1367         if (newcurr) {
1368                 /* First restore type and order in case of RMW operation */
1369                 if ((*curr)->is_rmwr())
1370                         newcurr->copy_typeandorder(*curr);
1371
1372                 ASSERT((*curr)->get_location() == newcurr->get_location());
1373                 newcurr->copy_from_new(*curr);
1374
1375                 /* Discard duplicate ModelAction; use action from NodeStack */
1376                 delete *curr;
1377
1378                 /* Always compute new clock vector */
1379                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1380
1381                 *curr = newcurr;
1382                 return false; /* Action was explored previously */
1383         } else {
1384                 newcurr = *curr;
1385
1386                 /* Always compute new clock vector */
1387                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1388
1389                 /* Assign most recent release fence */
1390                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1391
1392                 /*
1393                  * Perform one-time actions when pushing new ModelAction onto
1394                  * NodeStack
1395                  */
1396                 if (newcurr->is_write())
1397                         compute_promises(newcurr);
1398                 else if (newcurr->is_relseq_fixup())
1399                         compute_relseq_breakwrites(newcurr);
1400                 else if (newcurr->is_wait())
1401                         newcurr->get_node()->set_misc_max(2);
1402                 else if (newcurr->is_notify_one()) {
1403                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1404                 }
1405                 return true; /* This was a new ModelAction */
1406         }
1407 }
1408
1409 /**
1410  * @brief Establish reads-from relation between two actions
1411  *
1412  * Perform basic operations involved with establishing a concrete rf relation,
1413  * including setting the ModelAction data and checking for release sequences.
1414  *
1415  * @param act The action that is reading (must be a read)
1416  * @param rf The action from which we are reading (must be a write)
1417  *
1418  * @return True if this read established synchronization
1419  */
1420 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1421 {
1422         ASSERT(rf);
1423         ASSERT(rf->is_write());
1424
1425         act->set_read_from(rf);
1426         if (act->is_acquire()) {
1427                 rel_heads_list_t release_heads;
1428                 get_release_seq_heads(act, act, &release_heads);
1429                 int num_heads = release_heads.size();
1430                 for (unsigned int i = 0; i < release_heads.size(); i++)
1431                         if (!act->synchronize_with(release_heads[i])) {
1432                                 set_bad_synchronization();
1433                                 num_heads--;
1434                         }
1435                 return num_heads > 0;
1436         }
1437         return false;
1438 }
1439
1440 /**
1441  * Check promises and eliminate potentially-satisfying threads when a thread is
1442  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1443  * no longer satisfy a promise generated from that thread.
1444  *
1445  * @param blocker The thread on which a thread is waiting
1446  * @param waiting The waiting thread
1447  */
1448 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1449 {
1450         for (unsigned int i = 0; i < promises->size(); i++) {
1451                 Promise *promise = (*promises)[i];
1452                 if (!promise->thread_is_available(waiting->get_id()))
1453                         continue;
1454                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1455                         ModelAction *reader = promise->get_reader(j);
1456                         if (reader->get_tid() != blocker->get_id())
1457                                 continue;
1458                         if (promise->eliminate_thread(waiting->get_id())) {
1459                                 /* Promise has failed */
1460                                 priv->failed_promise = true;
1461                         } else {
1462                                 /* Only eliminate the 'waiting' thread once */
1463                                 return;
1464                         }
1465                 }
1466         }
1467 }
1468
1469 /**
1470  * @brief Check whether a model action is enabled.
1471  *
1472  * Checks whether a lock or join operation would be successful (i.e., is the
1473  * lock already locked, or is the joined thread already complete). If not, put
1474  * the action in a waiter list.
1475  *
1476  * @param curr is the ModelAction to check whether it is enabled.
1477  * @return a bool that indicates whether the action is enabled.
1478  */
1479 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1480         if (curr->is_lock()) {
1481                 std::mutex *lock = curr->get_mutex();
1482                 struct std::mutex_state *state = lock->get_state();
1483                 if (state->locked)
1484                         return false;
1485         } else if (curr->is_thread_join()) {
1486                 Thread *blocking = curr->get_thread_operand();
1487                 if (!blocking->is_complete()) {
1488                         thread_blocking_check_promises(blocking, get_thread(curr));
1489                         return false;
1490                 }
1491         }
1492
1493         return true;
1494 }
1495
1496 /**
1497  * This is the heart of the model checker routine. It performs model-checking
1498  * actions corresponding to a given "current action." Among other processes, it
1499  * calculates reads-from relationships, updates synchronization clock vectors,
1500  * forms a memory_order constraints graph, and handles replay/backtrack
1501  * execution when running permutations of previously-observed executions.
1502  *
1503  * @param curr The current action to process
1504  * @return The ModelAction that is actually executed; may be different than
1505  * curr; may be NULL, if the current action is not enabled to run
1506  */
1507 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1508 {
1509         ASSERT(curr);
1510         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1511         bool newly_explored = initialize_curr_action(&curr);
1512
1513         DBG();
1514
1515         wake_up_sleeping_actions(curr);
1516
1517         /* Compute fairness information for CHESS yield algorithm */
1518         if (model->params.yieldon) {
1519                 curr->get_node()->update_yield(scheduler);
1520         }
1521
1522         /* Add the action to lists before any other model-checking tasks */
1523         if (!second_part_of_rmw)
1524                 add_action_to_lists(curr);
1525
1526         /* Build may_read_from set for newly-created actions */
1527         if (newly_explored && curr->is_read())
1528                 build_may_read_from(curr);
1529
1530         /* Initialize work_queue with the "current action" work */
1531         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1532         while (!work_queue.empty() && !has_asserted()) {
1533                 WorkQueueEntry work = work_queue.front();
1534                 work_queue.pop_front();
1535
1536                 switch (work.type) {
1537                 case WORK_CHECK_CURR_ACTION: {
1538                         ModelAction *act = work.action;
1539                         bool update = false; /* update this location's release seq's */
1540                         bool update_all = false; /* update all release seq's */
1541
1542                         if (process_thread_action(curr))
1543                                 update_all = true;
1544
1545                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1546                                 update = true;
1547
1548                         if (act->is_write() && process_write(act))
1549                                 update = true;
1550
1551                         if (act->is_fence() && process_fence(act))
1552                                 update_all = true;
1553
1554                         if (act->is_mutex_op() && process_mutex(act))
1555                                 update_all = true;
1556
1557                         if (act->is_relseq_fixup())
1558                                 process_relseq_fixup(curr, &work_queue);
1559
1560                         if (update_all)
1561                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1562                         else if (update)
1563                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1564                         break;
1565                 }
1566                 case WORK_CHECK_RELEASE_SEQ:
1567                         resolve_release_sequences(work.location, &work_queue);
1568                         break;
1569                 case WORK_CHECK_MO_EDGES: {
1570                         /** @todo Complete verification of work_queue */
1571                         ModelAction *act = work.action;
1572                         bool updated = false;
1573
1574                         if (act->is_read()) {
1575                                 const ModelAction *rf = act->get_reads_from();
1576                                 const Promise *promise = act->get_reads_from_promise();
1577                                 if (rf) {
1578                                         if (r_modification_order(act, rf))
1579                                                 updated = true;
1580                                 } else if (promise) {
1581                                         if (r_modification_order(act, promise))
1582                                                 updated = true;
1583                                 }
1584                         }
1585                         if (act->is_write()) {
1586                                 if (w_modification_order(act, NULL))
1587                                         updated = true;
1588                         }
1589                         mo_graph->commitChanges();
1590
1591                         if (updated)
1592                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1593                         break;
1594                 }
1595                 default:
1596                         ASSERT(false);
1597                         break;
1598                 }
1599         }
1600
1601         check_curr_backtracking(curr);
1602         set_backtracking(curr);
1603         return curr;
1604 }
1605
1606 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1607 {
1608         Node *currnode = curr->get_node();
1609         Node *parnode = currnode->get_parent();
1610
1611         if ((parnode && !parnode->backtrack_empty()) ||
1612                          !currnode->misc_empty() ||
1613                          !currnode->read_from_empty() ||
1614                          !currnode->promise_empty() ||
1615                          !currnode->relseq_break_empty()) {
1616                 set_latest_backtrack(curr);
1617         }
1618 }
1619
1620 bool ModelChecker::promises_expired() const
1621 {
1622         for (unsigned int i = 0; i < promises->size(); i++) {
1623                 Promise *promise = (*promises)[i];
1624                 if (promise->get_expiration() < priv->used_sequence_numbers)
1625                         return true;
1626         }
1627         return false;
1628 }
1629
1630 /**
1631  * This is the strongest feasibility check available.
1632  * @return whether the current trace (partial or complete) must be a prefix of
1633  * a feasible trace.
1634  */
1635 bool ModelChecker::isfeasibleprefix() const
1636 {
1637         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1638 }
1639
1640 /**
1641  * Print disagnostic information about an infeasible execution
1642  * @param prefix A string to prefix the output with; if NULL, then a default
1643  * message prefix will be provided
1644  */
1645 void ModelChecker::print_infeasibility(const char *prefix) const
1646 {
1647         char buf[100];
1648         char *ptr = buf;
1649         if (mo_graph->checkForCycles())
1650                 ptr += sprintf(ptr, "[mo cycle]");
1651         if (priv->failed_promise)
1652                 ptr += sprintf(ptr, "[failed promise]");
1653         if (priv->too_many_reads)
1654                 ptr += sprintf(ptr, "[too many reads]");
1655         if (priv->no_valid_reads)
1656                 ptr += sprintf(ptr, "[no valid reads-from]");
1657         if (priv->bad_synchronization)
1658                 ptr += sprintf(ptr, "[bad sw ordering]");
1659         if (promises_expired())
1660                 ptr += sprintf(ptr, "[promise expired]");
1661         if (promises->size() != 0)
1662                 ptr += sprintf(ptr, "[unresolved promise]");
1663         if (ptr != buf)
1664                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1665 }
1666
1667 /**
1668  * Returns whether the current completed trace is feasible, except for pending
1669  * release sequences.
1670  */
1671 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1672 {
1673         return !is_infeasible() && promises->size() == 0;
1674 }
1675
1676 /**
1677  * Check if the current partial trace is infeasible. Does not check any
1678  * end-of-execution flags, which might rule out the execution. Thus, this is
1679  * useful only for ruling an execution as infeasible.
1680  * @return whether the current partial trace is infeasible.
1681  */
1682 bool ModelChecker::is_infeasible() const
1683 {
1684         return mo_graph->checkForCycles() ||
1685                 priv->no_valid_reads ||
1686                 priv->failed_promise ||
1687                 priv->too_many_reads ||
1688                 priv->bad_synchronization ||
1689                 promises_expired();
1690 }
1691
1692 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1693 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1694         ModelAction *lastread = get_last_action(act->get_tid());
1695         lastread->process_rmw(act);
1696         if (act->is_rmw()) {
1697                 if (lastread->get_reads_from())
1698                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1699                 else
1700                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1701                 mo_graph->commitChanges();
1702         }
1703         return lastread;
1704 }
1705
1706 /**
1707  * A helper function for ModelChecker::check_recency, to check if the current
1708  * thread is able to read from a different write/promise for 'params.maxreads'
1709  * number of steps and if that write/promise should become visible (i.e., is
1710  * ordered later in the modification order). This helps model memory liveness.
1711  *
1712  * @param curr The current action. Must be a read.
1713  * @param rf The write/promise from which we plan to read
1714  * @param other_rf The write/promise from which we may read
1715  * @return True if we were able to read from other_rf for params.maxreads steps
1716  */
1717 template <typename T, typename U>
1718 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1719 {
1720         /* Need a different write/promise */
1721         if (other_rf->equals(rf))
1722                 return false;
1723
1724         /* Only look for "newer" writes/promises */
1725         if (!mo_graph->checkReachable(rf, other_rf))
1726                 return false;
1727
1728         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1729         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1730         action_list_t::reverse_iterator rit = list->rbegin();
1731         ASSERT((*rit) == curr);
1732         /* Skip past curr */
1733         rit++;
1734
1735         /* Does this write/promise work for everyone? */
1736         for (int i = 0; i < params.maxreads; i++, rit++) {
1737                 ModelAction *act = *rit;
1738                 if (!act->may_read_from(other_rf))
1739                         return false;
1740         }
1741         return true;
1742 }
1743
1744 /**
1745  * Checks whether a thread has read from the same write or Promise for too many
1746  * times without seeing the effects of a later write/Promise.
1747  *
1748  * Basic idea:
1749  * 1) there must a different write/promise that we could read from,
1750  * 2) we must have read from the same write/promise in excess of maxreads times,
1751  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1752  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1753  *
1754  * If so, we decide that the execution is no longer feasible.
1755  *
1756  * @param curr The current action. Must be a read.
1757  * @param rf The ModelAction/Promise from which we might read.
1758  * @return True if the read should succeed; false otherwise
1759  */
1760 template <typename T>
1761 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1762 {
1763         if (!params.maxreads)
1764                 return true;
1765
1766         //NOTE: Next check is just optimization, not really necessary....
1767         if (curr->get_node()->get_read_from_past_size() +
1768                         curr->get_node()->get_read_from_promise_size() <= 1)
1769                 return true;
1770
1771         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1772         int tid = id_to_int(curr->get_tid());
1773         ASSERT(tid < (int)thrd_lists->size());
1774         action_list_t *list = &(*thrd_lists)[tid];
1775         action_list_t::reverse_iterator rit = list->rbegin();
1776         ASSERT((*rit) == curr);
1777         /* Skip past curr */
1778         rit++;
1779
1780         action_list_t::reverse_iterator ritcopy = rit;
1781         /* See if we have enough reads from the same value */
1782         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1783                 if (ritcopy == list->rend())
1784                         return true;
1785                 ModelAction *act = *ritcopy;
1786                 if (!act->is_read())
1787                         return true;
1788                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1789                         return true;
1790                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1791                         return true;
1792                 if (act->get_node()->get_read_from_past_size() +
1793                                 act->get_node()->get_read_from_promise_size() <= 1)
1794                         return true;
1795         }
1796         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1797                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1798                 if (should_read_instead(curr, rf, write))
1799                         return false; /* liveness failure */
1800         }
1801         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1802                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1803                 if (should_read_instead(curr, rf, promise))
1804                         return false; /* liveness failure */
1805         }
1806         return true;
1807 }
1808
1809 /**
1810  * @brief Updates the mo_graph with the constraints imposed from the current
1811  * read.
1812  *
1813  * Basic idea is the following: Go through each other thread and find
1814  * the last action that happened before our read.  Two cases:
1815  *
1816  * -# The action is a write: that write must either occur before
1817  * the write we read from or be the write we read from.
1818  * -# The action is a read: the write that that action read from
1819  * must occur before the write we read from or be the same write.
1820  *
1821  * @param curr The current action. Must be a read.
1822  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1823  * @return True if modification order edges were added; false otherwise
1824  */
1825 template <typename rf_type>
1826 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1827 {
1828         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1829         unsigned int i;
1830         bool added = false;
1831         ASSERT(curr->is_read());
1832
1833         /* Last SC fence in the current thread */
1834         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1835         ModelAction *last_sc_write = NULL;
1836         if (curr->is_seqcst())
1837                 last_sc_write = get_last_seq_cst_write(curr);
1838
1839         /* Iterate over all threads */
1840         for (i = 0; i < thrd_lists->size(); i++) {
1841                 /* Last SC fence in thread i */
1842                 ModelAction *last_sc_fence_thread_local = NULL;
1843                 if (int_to_id((int)i) != curr->get_tid())
1844                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1845
1846                 /* Last SC fence in thread i, before last SC fence in current thread */
1847                 ModelAction *last_sc_fence_thread_before = NULL;
1848                 if (last_sc_fence_local)
1849                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1850
1851                 /* Iterate over actions in thread, starting from most recent */
1852                 action_list_t *list = &(*thrd_lists)[i];
1853                 action_list_t::reverse_iterator rit;
1854                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1855                         ModelAction *act = *rit;
1856
1857                         /* Skip curr */
1858                         if (act == curr)
1859                                 continue;
1860                         /* Don't want to add reflexive edges on 'rf' */
1861                         if (act->equals(rf)) {
1862                                 if (act->happens_before(curr))
1863                                         break;
1864                                 else
1865                                         continue;
1866                         }
1867
1868                         if (act->is_write()) {
1869                                 /* C++, Section 29.3 statement 5 */
1870                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1871                                                 *act < *last_sc_fence_thread_local) {
1872                                         added = mo_graph->addEdge(act, rf) || added;
1873                                         break;
1874                                 }
1875                                 /* C++, Section 29.3 statement 4 */
1876                                 else if (act->is_seqcst() && last_sc_fence_local &&
1877                                                 *act < *last_sc_fence_local) {
1878                                         added = mo_graph->addEdge(act, rf) || added;
1879                                         break;
1880                                 }
1881                                 /* C++, Section 29.3 statement 6 */
1882                                 else if (last_sc_fence_thread_before &&
1883                                                 *act < *last_sc_fence_thread_before) {
1884                                         added = mo_graph->addEdge(act, rf) || added;
1885                                         break;
1886                                 }
1887                         }
1888
1889                         /* C++, Section 29.3 statement 3 (second subpoint) */
1890                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1891                                 added = mo_graph->addEdge(act, rf) || added;
1892                                 break;
1893                         }
1894
1895                         /*
1896                          * Include at most one act per-thread that "happens
1897                          * before" curr
1898                          */
1899                         if (act->happens_before(curr)) {
1900                                 if (act->is_write()) {
1901                                         added = mo_graph->addEdge(act, rf) || added;
1902                                 } else {
1903                                         const ModelAction *prevrf = act->get_reads_from();
1904                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1905                                         if (prevrf) {
1906                                                 if (!prevrf->equals(rf))
1907                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1908                                         } else if (!prevrf_promise->equals(rf)) {
1909                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1910                                         }
1911                                 }
1912                                 break;
1913                         }
1914                 }
1915         }
1916
1917         /*
1918          * All compatible, thread-exclusive promises must be ordered after any
1919          * concrete loads from the same thread
1920          */
1921         for (unsigned int i = 0; i < promises->size(); i++)
1922                 if ((*promises)[i]->is_compatible_exclusive(curr))
1923                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1924
1925         return added;
1926 }
1927
1928 /**
1929  * Updates the mo_graph with the constraints imposed from the current write.
1930  *
1931  * Basic idea is the following: Go through each other thread and find
1932  * the lastest action that happened before our write.  Two cases:
1933  *
1934  * (1) The action is a write => that write must occur before
1935  * the current write
1936  *
1937  * (2) The action is a read => the write that that action read from
1938  * must occur before the current write.
1939  *
1940  * This method also handles two other issues:
1941  *
1942  * (I) Sequential Consistency: Making sure that if the current write is
1943  * seq_cst, that it occurs after the previous seq_cst write.
1944  *
1945  * (II) Sending the write back to non-synchronizing reads.
1946  *
1947  * @param curr The current action. Must be a write.
1948  * @param send_fv A vector for stashing reads to which we may pass our future
1949  * value. If NULL, then don't record any future values.
1950  * @return True if modification order edges were added; false otherwise
1951  */
1952 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1953 {
1954         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1955         unsigned int i;
1956         bool added = false;
1957         ASSERT(curr->is_write());
1958
1959         if (curr->is_seqcst()) {
1960                 /* We have to at least see the last sequentially consistent write,
1961                          so we are initialized. */
1962                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1963                 if (last_seq_cst != NULL) {
1964                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1965                 }
1966         }
1967
1968         /* Last SC fence in the current thread */
1969         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1970
1971         /* Iterate over all threads */
1972         for (i = 0; i < thrd_lists->size(); i++) {
1973                 /* Last SC fence in thread i, before last SC fence in current thread */
1974                 ModelAction *last_sc_fence_thread_before = NULL;
1975                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1976                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1977
1978                 /* Iterate over actions in thread, starting from most recent */
1979                 action_list_t *list = &(*thrd_lists)[i];
1980                 action_list_t::reverse_iterator rit;
1981                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1982                         ModelAction *act = *rit;
1983                         if (act == curr) {
1984                                 /*
1985                                  * 1) If RMW and it actually read from something, then we
1986                                  * already have all relevant edges, so just skip to next
1987                                  * thread.
1988                                  *
1989                                  * 2) If RMW and it didn't read from anything, we should
1990                                  * whatever edge we can get to speed up convergence.
1991                                  *
1992                                  * 3) If normal write, we need to look at earlier actions, so
1993                                  * continue processing list.
1994                                  */
1995                                 if (curr->is_rmw()) {
1996                                         if (curr->get_reads_from() != NULL)
1997                                                 break;
1998                                         else
1999                                                 continue;
2000                                 } else
2001                                         continue;
2002                         }
2003
2004                         /* C++, Section 29.3 statement 7 */
2005                         if (last_sc_fence_thread_before && act->is_write() &&
2006                                         *act < *last_sc_fence_thread_before) {
2007                                 added = mo_graph->addEdge(act, curr) || added;
2008                                 break;
2009                         }
2010
2011                         /*
2012                          * Include at most one act per-thread that "happens
2013                          * before" curr
2014                          */
2015                         if (act->happens_before(curr)) {
2016                                 /*
2017                                  * Note: if act is RMW, just add edge:
2018                                  *   act --mo--> curr
2019                                  * The following edge should be handled elsewhere:
2020                                  *   readfrom(act) --mo--> act
2021                                  */
2022                                 if (act->is_write())
2023                                         added = mo_graph->addEdge(act, curr) || added;
2024                                 else if (act->is_read()) {
2025                                         //if previous read accessed a null, just keep going
2026                                         if (act->get_reads_from() == NULL)
2027                                                 continue;
2028                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2029                                 }
2030                                 break;
2031                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2032                                                      !act->same_thread(curr)) {
2033                                 /* We have an action that:
2034                                    (1) did not happen before us
2035                                    (2) is a read and we are a write
2036                                    (3) cannot synchronize with us
2037                                    (4) is in a different thread
2038                                    =>
2039                                    that read could potentially read from our write.  Note that
2040                                    these checks are overly conservative at this point, we'll
2041                                    do more checks before actually removing the
2042                                    pendingfuturevalue.
2043
2044                                  */
2045                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2046                                         if (!is_infeasible())
2047                                                 send_fv->push_back(act);
2048                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2049                                                 add_future_value(curr, act);
2050                                 }
2051                         }
2052                 }
2053         }
2054
2055         /*
2056          * All compatible, thread-exclusive promises must be ordered after any
2057          * concrete stores to the same thread, or else they can be merged with
2058          * this store later
2059          */
2060         for (unsigned int i = 0; i < promises->size(); i++)
2061                 if ((*promises)[i]->is_compatible_exclusive(curr))
2062                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2063
2064         return added;
2065 }
2066
2067 /** Arbitrary reads from the future are not allowed.  Section 29.3
2068  * part 9 places some constraints.  This method checks one result of constraint
2069  * constraint.  Others require compiler support. */
2070 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2071 {
2072         if (!writer->is_rmw())
2073                 return true;
2074
2075         if (!reader->is_rmw())
2076                 return true;
2077
2078         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2079                 if (search == reader)
2080                         return false;
2081                 if (search->get_tid() == reader->get_tid() &&
2082                                 search->happens_before(reader))
2083                         break;
2084         }
2085
2086         return true;
2087 }
2088
2089 /**
2090  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2091  * some constraints. This method checks one the following constraint (others
2092  * require compiler support):
2093  *
2094  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2095  */
2096 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2097 {
2098         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2099         unsigned int i;
2100         /* Iterate over all threads */
2101         for (i = 0; i < thrd_lists->size(); i++) {
2102                 const ModelAction *write_after_read = NULL;
2103
2104                 /* Iterate over actions in thread, starting from most recent */
2105                 action_list_t *list = &(*thrd_lists)[i];
2106                 action_list_t::reverse_iterator rit;
2107                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2108                         ModelAction *act = *rit;
2109
2110                         /* Don't disallow due to act == reader */
2111                         if (!reader->happens_before(act) || reader == act)
2112                                 break;
2113                         else if (act->is_write())
2114                                 write_after_read = act;
2115                         else if (act->is_read() && act->get_reads_from() != NULL)
2116                                 write_after_read = act->get_reads_from();
2117                 }
2118
2119                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2120                         return false;
2121         }
2122         return true;
2123 }
2124
2125 /**
2126  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2127  * The ModelAction under consideration is expected to be taking part in
2128  * release/acquire synchronization as an object of the "reads from" relation.
2129  * Note that this can only provide release sequence support for RMW chains
2130  * which do not read from the future, as those actions cannot be traced until
2131  * their "promise" is fulfilled. Similarly, we may not even establish the
2132  * presence of a release sequence with certainty, as some modification order
2133  * constraints may be decided further in the future. Thus, this function
2134  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2135  * and a boolean representing certainty.
2136  *
2137  * @param rf The action that might be part of a release sequence. Must be a
2138  * write.
2139  * @param release_heads A pass-by-reference style return parameter. After
2140  * execution of this function, release_heads will contain the heads of all the
2141  * relevant release sequences, if any exists with certainty
2142  * @param pending A pass-by-reference style return parameter which is only used
2143  * when returning false (i.e., uncertain). Returns most information regarding
2144  * an uncertain release sequence, including any write operations that might
2145  * break the sequence.
2146  * @return true, if the ModelChecker is certain that release_heads is complete;
2147  * false otherwise
2148  */
2149 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2150                 rel_heads_list_t *release_heads,
2151                 struct release_seq *pending) const
2152 {
2153         /* Only check for release sequences if there are no cycles */
2154         if (mo_graph->checkForCycles())
2155                 return false;
2156
2157         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2158                 ASSERT(rf->is_write());
2159
2160                 if (rf->is_release())
2161                         release_heads->push_back(rf);
2162                 else if (rf->get_last_fence_release())
2163                         release_heads->push_back(rf->get_last_fence_release());
2164                 if (!rf->is_rmw())
2165                         break; /* End of RMW chain */
2166
2167                 /** @todo Need to be smarter here...  In the linux lock
2168                  * example, this will run to the beginning of the program for
2169                  * every acquire. */
2170                 /** @todo The way to be smarter here is to keep going until 1
2171                  * thread has a release preceded by an acquire and you've seen
2172                  *       both. */
2173
2174                 /* acq_rel RMW is a sufficient stopping condition */
2175                 if (rf->is_acquire() && rf->is_release())
2176                         return true; /* complete */
2177         };
2178         if (!rf) {
2179                 /* read from future: need to settle this later */
2180                 pending->rf = NULL;
2181                 return false; /* incomplete */
2182         }
2183
2184         if (rf->is_release())
2185                 return true; /* complete */
2186
2187         /* else relaxed write
2188          * - check for fence-release in the same thread (29.8, stmt. 3)
2189          * - check modification order for contiguous subsequence
2190          *   -> rf must be same thread as release */
2191
2192         const ModelAction *fence_release = rf->get_last_fence_release();
2193         /* Synchronize with a fence-release unconditionally; we don't need to
2194          * find any more "contiguous subsequence..." for it */
2195         if (fence_release)
2196                 release_heads->push_back(fence_release);
2197
2198         int tid = id_to_int(rf->get_tid());
2199         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2200         action_list_t *list = &(*thrd_lists)[tid];
2201         action_list_t::const_reverse_iterator rit;
2202
2203         /* Find rf in the thread list */
2204         rit = std::find(list->rbegin(), list->rend(), rf);
2205         ASSERT(rit != list->rend());
2206
2207         /* Find the last {write,fence}-release */
2208         for (; rit != list->rend(); rit++) {
2209                 if (fence_release && *(*rit) < *fence_release)
2210                         break;
2211                 if ((*rit)->is_release())
2212                         break;
2213         }
2214         if (rit == list->rend()) {
2215                 /* No write-release in this thread */
2216                 return true; /* complete */
2217         } else if (fence_release && *(*rit) < *fence_release) {
2218                 /* The fence-release is more recent (and so, "stronger") than
2219                  * the most recent write-release */
2220                 return true; /* complete */
2221         } /* else, need to establish contiguous release sequence */
2222         ModelAction *release = *rit;
2223
2224         ASSERT(rf->same_thread(release));
2225
2226         pending->writes.clear();
2227
2228         bool certain = true;
2229         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2230                 if (id_to_int(rf->get_tid()) == (int)i)
2231                         continue;
2232                 list = &(*thrd_lists)[i];
2233
2234                 /* Can we ensure no future writes from this thread may break
2235                  * the release seq? */
2236                 bool future_ordered = false;
2237
2238                 ModelAction *last = get_last_action(int_to_id(i));
2239                 Thread *th = get_thread(int_to_id(i));
2240                 if ((last && rf->happens_before(last)) ||
2241                                 !is_enabled(th) ||
2242                                 th->is_complete())
2243                         future_ordered = true;
2244
2245                 ASSERT(!th->is_model_thread() || future_ordered);
2246
2247                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2248                         const ModelAction *act = *rit;
2249                         /* Reach synchronization -> this thread is complete */
2250                         if (act->happens_before(release))
2251                                 break;
2252                         if (rf->happens_before(act)) {
2253                                 future_ordered = true;
2254                                 continue;
2255                         }
2256
2257                         /* Only non-RMW writes can break release sequences */
2258                         if (!act->is_write() || act->is_rmw())
2259                                 continue;
2260
2261                         /* Check modification order */
2262                         if (mo_graph->checkReachable(rf, act)) {
2263                                 /* rf --mo--> act */
2264                                 future_ordered = true;
2265                                 continue;
2266                         }
2267                         if (mo_graph->checkReachable(act, release))
2268                                 /* act --mo--> release */
2269                                 break;
2270                         if (mo_graph->checkReachable(release, act) &&
2271                                       mo_graph->checkReachable(act, rf)) {
2272                                 /* release --mo-> act --mo--> rf */
2273                                 return true; /* complete */
2274                         }
2275                         /* act may break release sequence */
2276                         pending->writes.push_back(act);
2277                         certain = false;
2278                 }
2279                 if (!future_ordered)
2280                         certain = false; /* This thread is uncertain */
2281         }
2282
2283         if (certain) {
2284                 release_heads->push_back(release);
2285                 pending->writes.clear();
2286         } else {
2287                 pending->release = release;
2288                 pending->rf = rf;
2289         }
2290         return certain;
2291 }
2292
2293 /**
2294  * An interface for getting the release sequence head(s) with which a
2295  * given ModelAction must synchronize. This function only returns a non-empty
2296  * result when it can locate a release sequence head with certainty. Otherwise,
2297  * it may mark the internal state of the ModelChecker so that it will handle
2298  * the release sequence at a later time, causing @a acquire to update its
2299  * synchronization at some later point in execution.
2300  *
2301  * @param acquire The 'acquire' action that may synchronize with a release
2302  * sequence
2303  * @param read The read action that may read from a release sequence; this may
2304  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2305  * when 'acquire' is a fence-acquire)
2306  * @param release_heads A pass-by-reference return parameter. Will be filled
2307  * with the head(s) of the release sequence(s), if they exists with certainty.
2308  * @see ModelChecker::release_seq_heads
2309  */
2310 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2311                 ModelAction *read, rel_heads_list_t *release_heads)
2312 {
2313         const ModelAction *rf = read->get_reads_from();
2314         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2315         sequence->acquire = acquire;
2316         sequence->read = read;
2317
2318         if (!release_seq_heads(rf, release_heads, sequence)) {
2319                 /* add act to 'lazy checking' list */
2320                 pending_rel_seqs->push_back(sequence);
2321         } else {
2322                 snapshot_free(sequence);
2323         }
2324 }
2325
2326 /**
2327  * Attempt to resolve all stashed operations that might synchronize with a
2328  * release sequence for a given location. This implements the "lazy" portion of
2329  * determining whether or not a release sequence was contiguous, since not all
2330  * modification order information is present at the time an action occurs.
2331  *
2332  * @param location The location/object that should be checked for release
2333  * sequence resolutions. A NULL value means to check all locations.
2334  * @param work_queue The work queue to which to add work items as they are
2335  * generated
2336  * @return True if any updates occurred (new synchronization, new mo_graph
2337  * edges)
2338  */
2339 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2340 {
2341         bool updated = false;
2342         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2343         while (it != pending_rel_seqs->end()) {
2344                 struct release_seq *pending = *it;
2345                 ModelAction *acquire = pending->acquire;
2346                 const ModelAction *read = pending->read;
2347
2348                 /* Only resolve sequences on the given location, if provided */
2349                 if (location && read->get_location() != location) {
2350                         it++;
2351                         continue;
2352                 }
2353
2354                 const ModelAction *rf = read->get_reads_from();
2355                 rel_heads_list_t release_heads;
2356                 bool complete;
2357                 complete = release_seq_heads(rf, &release_heads, pending);
2358                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2359                         if (!acquire->has_synchronized_with(release_heads[i])) {
2360                                 if (acquire->synchronize_with(release_heads[i]))
2361                                         updated = true;
2362                                 else
2363                                         set_bad_synchronization();
2364                         }
2365                 }
2366
2367                 if (updated) {
2368                         /* Re-check all pending release sequences */
2369                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2370                         /* Re-check read-acquire for mo_graph edges */
2371                         if (acquire->is_read())
2372                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2373
2374                         /* propagate synchronization to later actions */
2375                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2376                         for (; (*rit) != acquire; rit++) {
2377                                 ModelAction *propagate = *rit;
2378                                 if (acquire->happens_before(propagate)) {
2379                                         propagate->synchronize_with(acquire);
2380                                         /* Re-check 'propagate' for mo_graph edges */
2381                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2382                                 }
2383                         }
2384                 }
2385                 if (complete) {
2386                         it = pending_rel_seqs->erase(it);
2387                         snapshot_free(pending);
2388                 } else {
2389                         it++;
2390                 }
2391         }
2392
2393         // If we resolved promises or data races, see if we have realized a data race.
2394         checkDataRaces();
2395
2396         return updated;
2397 }
2398
2399 /**
2400  * Performs various bookkeeping operations for the current ModelAction. For
2401  * instance, adds action to the per-object, per-thread action vector and to the
2402  * action trace list of all thread actions.
2403  *
2404  * @param act is the ModelAction to add.
2405  */
2406 void ModelChecker::add_action_to_lists(ModelAction *act)
2407 {
2408         int tid = id_to_int(act->get_tid());
2409         ModelAction *uninit = NULL;
2410         int uninit_id = -1;
2411         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2412         if (list->empty() && act->is_atomic_var()) {
2413                 uninit = get_uninitialized_action(act);
2414                 uninit_id = id_to_int(uninit->get_tid());
2415                 list->push_front(uninit);
2416         }
2417         list->push_back(act);
2418
2419         action_trace->push_back(act);
2420         if (uninit)
2421                 action_trace->push_front(uninit);
2422
2423         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2424         if (tid >= (int)vec->size())
2425                 vec->resize(priv->next_thread_id);
2426         (*vec)[tid].push_back(act);
2427         if (uninit)
2428                 (*vec)[uninit_id].push_front(uninit);
2429
2430         if ((int)thrd_last_action->size() <= tid)
2431                 thrd_last_action->resize(get_num_threads());
2432         (*thrd_last_action)[tid] = act;
2433         if (uninit)
2434                 (*thrd_last_action)[uninit_id] = uninit;
2435
2436         if (act->is_fence() && act->is_release()) {
2437                 if ((int)thrd_last_fence_release->size() <= tid)
2438                         thrd_last_fence_release->resize(get_num_threads());
2439                 (*thrd_last_fence_release)[tid] = act;
2440         }
2441
2442         if (act->is_wait()) {
2443                 void *mutex_loc = (void *) act->get_value();
2444                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2445
2446                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2447                 if (tid >= (int)vec->size())
2448                         vec->resize(priv->next_thread_id);
2449                 (*vec)[tid].push_back(act);
2450         }
2451 }
2452
2453 /**
2454  * @brief Get the last action performed by a particular Thread
2455  * @param tid The thread ID of the Thread in question
2456  * @return The last action in the thread
2457  */
2458 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2459 {
2460         int threadid = id_to_int(tid);
2461         if (threadid < (int)thrd_last_action->size())
2462                 return (*thrd_last_action)[id_to_int(tid)];
2463         else
2464                 return NULL;
2465 }
2466
2467 /**
2468  * @brief Get the last fence release performed by a particular Thread
2469  * @param tid The thread ID of the Thread in question
2470  * @return The last fence release in the thread, if one exists; NULL otherwise
2471  */
2472 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2473 {
2474         int threadid = id_to_int(tid);
2475         if (threadid < (int)thrd_last_fence_release->size())
2476                 return (*thrd_last_fence_release)[id_to_int(tid)];
2477         else
2478                 return NULL;
2479 }
2480
2481 /**
2482  * Gets the last memory_order_seq_cst write (in the total global sequence)
2483  * performed on a particular object (i.e., memory location), not including the
2484  * current action.
2485  * @param curr The current ModelAction; also denotes the object location to
2486  * check
2487  * @return The last seq_cst write
2488  */
2489 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2490 {
2491         void *location = curr->get_location();
2492         action_list_t *list = get_safe_ptr_action(obj_map, location);
2493         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2494         action_list_t::reverse_iterator rit;
2495         for (rit = list->rbegin(); (*rit) != curr; rit++)
2496                 ;
2497         rit++; /* Skip past curr */
2498         for ( ; rit != list->rend(); rit++)
2499                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2500                         return *rit;
2501         return NULL;
2502 }
2503
2504 /**
2505  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2506  * performed in a particular thread, prior to a particular fence.
2507  * @param tid The ID of the thread to check
2508  * @param before_fence The fence from which to begin the search; if NULL, then
2509  * search for the most recent fence in the thread.
2510  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2511  */
2512 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2513 {
2514         /* All fences should have NULL location */
2515         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2516         action_list_t::reverse_iterator rit = list->rbegin();
2517
2518         if (before_fence) {
2519                 for (; rit != list->rend(); rit++)
2520                         if (*rit == before_fence)
2521                                 break;
2522
2523                 ASSERT(*rit == before_fence);
2524                 rit++;
2525         }
2526
2527         for (; rit != list->rend(); rit++)
2528                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2529                         return *rit;
2530         return NULL;
2531 }
2532
2533 /**
2534  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2535  * location). This function identifies the mutex according to the current
2536  * action, which is presumed to perform on the same mutex.
2537  * @param curr The current ModelAction; also denotes the object location to
2538  * check
2539  * @return The last unlock operation
2540  */
2541 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2542 {
2543         void *location = curr->get_location();
2544         action_list_t *list = get_safe_ptr_action(obj_map, location);
2545         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2546         action_list_t::reverse_iterator rit;
2547         for (rit = list->rbegin(); rit != list->rend(); rit++)
2548                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2549                         return *rit;
2550         return NULL;
2551 }
2552
2553 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2554 {
2555         ModelAction *parent = get_last_action(tid);
2556         if (!parent)
2557                 parent = get_thread(tid)->get_creation();
2558         return parent;
2559 }
2560
2561 /**
2562  * Returns the clock vector for a given thread.
2563  * @param tid The thread whose clock vector we want
2564  * @return Desired clock vector
2565  */
2566 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2567 {
2568         return get_parent_action(tid)->get_cv();
2569 }
2570
2571 /**
2572  * @brief Find the promise (if any) to resolve for the current action and
2573  * remove it from the pending promise vector
2574  * @param curr The current ModelAction. Should be a write.
2575  * @return The Promise to resolve, if any; otherwise NULL
2576  */
2577 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2578 {
2579         for (unsigned int i = 0; i < promises->size(); i++)
2580                 if (curr->get_node()->get_promise(i)) {
2581                         Promise *ret = (*promises)[i];
2582                         promises->erase(promises->begin() + i);
2583                         return ret;
2584                 }
2585         return NULL;
2586 }
2587
2588 /**
2589  * Resolve a Promise with a current write.
2590  * @param write The ModelAction that is fulfilling Promises
2591  * @param promise The Promise to resolve
2592  * @return True if the Promise was successfully resolved; false otherwise
2593  */
2594 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2595 {
2596         ModelVector<ModelAction *> actions_to_check;
2597
2598         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2599                 ModelAction *read = promise->get_reader(i);
2600                 read_from(read, write);
2601                 actions_to_check.push_back(read);
2602         }
2603         /* Make sure the promise's value matches the write's value */
2604         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2605         if (!mo_graph->resolvePromise(promise, write))
2606                 priv->failed_promise = true;
2607
2608         /**
2609          * @todo  It is possible to end up in an inconsistent state, where a
2610          * "resolved" promise may still be referenced if
2611          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2612          *
2613          * Note that the inconsistency only matters when dumping mo_graph to
2614          * file.
2615          *
2616          * delete promise;
2617          */
2618
2619         //Check whether reading these writes has made threads unable to
2620         //resolve promises
2621         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2622                 ModelAction *read = actions_to_check[i];
2623                 mo_check_promises(read, true);
2624         }
2625
2626         return true;
2627 }
2628
2629 /**
2630  * Compute the set of promises that could potentially be satisfied by this
2631  * action. Note that the set computation actually appears in the Node, not in
2632  * ModelChecker.
2633  * @param curr The ModelAction that may satisfy promises
2634  */
2635 void ModelChecker::compute_promises(ModelAction *curr)
2636 {
2637         for (unsigned int i = 0; i < promises->size(); i++) {
2638                 Promise *promise = (*promises)[i];
2639                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2640                         continue;
2641
2642                 bool satisfy = true;
2643                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2644                         const ModelAction *act = promise->get_reader(j);
2645                         if (act->happens_before(curr) ||
2646                                         act->could_synchronize_with(curr)) {
2647                                 satisfy = false;
2648                                 break;
2649                         }
2650                 }
2651                 if (satisfy)
2652                         curr->get_node()->set_promise(i);
2653         }
2654 }
2655
2656 /** Checks promises in response to change in ClockVector Threads. */
2657 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2658 {
2659         for (unsigned int i = 0; i < promises->size(); i++) {
2660                 Promise *promise = (*promises)[i];
2661                 if (!promise->thread_is_available(tid))
2662                         continue;
2663                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2664                         const ModelAction *act = promise->get_reader(j);
2665                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2666                                         merge_cv->synchronized_since(act)) {
2667                                 if (promise->eliminate_thread(tid)) {
2668                                         /* Promise has failed */
2669                                         priv->failed_promise = true;
2670                                         return;
2671                                 }
2672                         }
2673                 }
2674         }
2675 }
2676
2677 void ModelChecker::check_promises_thread_disabled()
2678 {
2679         for (unsigned int i = 0; i < promises->size(); i++) {
2680                 Promise *promise = (*promises)[i];
2681                 if (promise->has_failed()) {
2682                         priv->failed_promise = true;
2683                         return;
2684                 }
2685         }
2686 }
2687
2688 /**
2689  * @brief Checks promises in response to addition to modification order for
2690  * threads.
2691  *
2692  * We test whether threads are still available for satisfying promises after an
2693  * addition to our modification order constraints. Those that are unavailable
2694  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2695  * that promise has failed.
2696  *
2697  * @param act The ModelAction which updated the modification order
2698  * @param is_read_check Should be true if act is a read and we must check for
2699  * updates to the store from which it read (there is a distinction here for
2700  * RMW's, which are both a load and a store)
2701  */
2702 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2703 {
2704         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2705
2706         for (unsigned int i = 0; i < promises->size(); i++) {
2707                 Promise *promise = (*promises)[i];
2708
2709                 // Is this promise on the same location?
2710                 if (!promise->same_location(write))
2711                         continue;
2712
2713                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2714                         const ModelAction *pread = promise->get_reader(j);
2715                         if (!pread->happens_before(act))
2716                                continue;
2717                         if (mo_graph->checkPromise(write, promise)) {
2718                                 priv->failed_promise = true;
2719                                 return;
2720                         }
2721                         break;
2722                 }
2723
2724                 // Don't do any lookups twice for the same thread
2725                 if (!promise->thread_is_available(act->get_tid()))
2726                         continue;
2727
2728                 if (mo_graph->checkReachable(promise, write)) {
2729                         if (mo_graph->checkPromise(write, promise)) {
2730                                 priv->failed_promise = true;
2731                                 return;
2732                         }
2733                 }
2734         }
2735 }
2736
2737 /**
2738  * Compute the set of writes that may break the current pending release
2739  * sequence. This information is extracted from previou release sequence
2740  * calculations.
2741  *
2742  * @param curr The current ModelAction. Must be a release sequence fixup
2743  * action.
2744  */
2745 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2746 {
2747         if (pending_rel_seqs->empty())
2748                 return;
2749
2750         struct release_seq *pending = pending_rel_seqs->back();
2751         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2752                 const ModelAction *write = pending->writes[i];
2753                 curr->get_node()->add_relseq_break(write);
2754         }
2755
2756         /* NULL means don't break the sequence; just synchronize */
2757         curr->get_node()->add_relseq_break(NULL);
2758 }
2759
2760 /**
2761  * Build up an initial set of all past writes that this 'read' action may read
2762  * from, as well as any previously-observed future values that must still be valid.
2763  *
2764  * @param curr is the current ModelAction that we are exploring; it must be a
2765  * 'read' operation.
2766  */
2767 void ModelChecker::build_may_read_from(ModelAction *curr)
2768 {
2769         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2770         unsigned int i;
2771         ASSERT(curr->is_read());
2772
2773         ModelAction *last_sc_write = NULL;
2774
2775         if (curr->is_seqcst())
2776                 last_sc_write = get_last_seq_cst_write(curr);
2777
2778         /* Iterate over all threads */
2779         for (i = 0; i < thrd_lists->size(); i++) {
2780                 /* Iterate over actions in thread, starting from most recent */
2781                 action_list_t *list = &(*thrd_lists)[i];
2782                 action_list_t::reverse_iterator rit;
2783                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2784                         ModelAction *act = *rit;
2785
2786                         /* Only consider 'write' actions */
2787                         if (!act->is_write() || act == curr)
2788                                 continue;
2789
2790                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2791                         bool allow_read = true;
2792
2793                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2794                                 allow_read = false;
2795                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2796                                 allow_read = false;
2797
2798                         if (allow_read) {
2799                                 /* Only add feasible reads */
2800                                 mo_graph->startChanges();
2801                                 r_modification_order(curr, act);
2802                                 if (!is_infeasible())
2803                                         curr->get_node()->add_read_from_past(act);
2804                                 mo_graph->rollbackChanges();
2805                         }
2806
2807                         /* Include at most one act per-thread that "happens before" curr */
2808                         if (act->happens_before(curr))
2809                                 break;
2810                 }
2811         }
2812
2813         /* Inherit existing, promised future values */
2814         for (i = 0; i < promises->size(); i++) {
2815                 const Promise *promise = (*promises)[i];
2816                 const ModelAction *promise_read = promise->get_reader(0);
2817                 if (promise_read->same_var(curr)) {
2818                         /* Only add feasible future-values */
2819                         mo_graph->startChanges();
2820                         r_modification_order(curr, promise);
2821                         if (!is_infeasible())
2822                                 curr->get_node()->add_read_from_promise(promise_read);
2823                         mo_graph->rollbackChanges();
2824                 }
2825         }
2826
2827         /* We may find no valid may-read-from only if the execution is doomed */
2828         if (!curr->get_node()->read_from_size()) {
2829                 priv->no_valid_reads = true;
2830                 set_assert();
2831         }
2832
2833         if (DBG_ENABLED()) {
2834                 model_print("Reached read action:\n");
2835                 curr->print();
2836                 model_print("Printing read_from_past\n");
2837                 curr->get_node()->print_read_from_past();
2838                 model_print("End printing read_from_past\n");
2839         }
2840 }
2841
2842 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2843 {
2844         for ( ; write != NULL; write = write->get_reads_from()) {
2845                 /* UNINIT actions don't have a Node, and they never sleep */
2846                 if (write->is_uninitialized())
2847                         return true;
2848                 Node *prevnode = write->get_node()->get_parent();
2849
2850                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2851                 if (write->is_release() && thread_sleep)
2852                         return true;
2853                 if (!write->is_rmw())
2854                         return false;
2855         }
2856         return true;
2857 }
2858
2859 /**
2860  * @brief Get an action representing an uninitialized atomic
2861  *
2862  * This function may create a new one or try to retrieve one from the NodeStack
2863  *
2864  * @param curr The current action, which prompts the creation of an UNINIT action
2865  * @return A pointer to the UNINIT ModelAction
2866  */
2867 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2868 {
2869         Node *node = curr->get_node();
2870         ModelAction *act = node->get_uninit_action();
2871         if (!act) {
2872                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2873                 node->set_uninit_action(act);
2874         }
2875         act->create_cv(NULL);
2876         return act;
2877 }
2878
2879 static void print_list(action_list_t *list)
2880 {
2881         action_list_t::iterator it;
2882
2883         model_print("---------------------------------------------------------------------\n");
2884
2885         unsigned int hash = 0;
2886
2887         for (it = list->begin(); it != list->end(); it++) {
2888                 const ModelAction *act = *it;
2889                 if (act->get_seq_number() > 0)
2890                         act->print();
2891                 hash = hash^(hash<<3)^((*it)->hash());
2892         }
2893         model_print("HASH %u\n", hash);
2894         model_print("---------------------------------------------------------------------\n");
2895 }
2896
2897 #if SUPPORT_MOD_ORDER_DUMP
2898 void ModelChecker::dumpGraph(char *filename) const
2899 {
2900         char buffer[200];
2901         sprintf(buffer, "%s.dot", filename);
2902         FILE *file = fopen(buffer, "w");
2903         fprintf(file, "digraph %s {\n", filename);
2904         mo_graph->dumpNodes(file);
2905         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2906
2907         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2908                 ModelAction *act = *it;
2909                 if (act->is_read()) {
2910                         mo_graph->dot_print_node(file, act);
2911                         if (act->get_reads_from())
2912                                 mo_graph->dot_print_edge(file,
2913                                                 act->get_reads_from(),
2914                                                 act,
2915                                                 "label=\"rf\", color=red, weight=2");
2916                         else
2917                                 mo_graph->dot_print_edge(file,
2918                                                 act->get_reads_from_promise(),
2919                                                 act,
2920                                                 "label=\"rf\", color=red");
2921                 }
2922                 if (thread_array[act->get_tid()]) {
2923                         mo_graph->dot_print_edge(file,
2924                                         thread_array[id_to_int(act->get_tid())],
2925                                         act,
2926                                         "label=\"sb\", color=blue, weight=400");
2927                 }
2928
2929                 thread_array[act->get_tid()] = act;
2930         }
2931         fprintf(file, "}\n");
2932         model_free(thread_array);
2933         fclose(file);
2934 }
2935 #endif
2936
2937 /** @brief Prints an execution trace summary. */
2938 void ModelChecker::print_summary() const
2939 {
2940 #if SUPPORT_MOD_ORDER_DUMP
2941         char buffername[100];
2942         sprintf(buffername, "exec%04u", stats.num_total);
2943         mo_graph->dumpGraphToFile(buffername);
2944         sprintf(buffername, "graph%04u", stats.num_total);
2945         dumpGraph(buffername);
2946 #endif
2947
2948         model_print("Execution %d:", stats.num_total);
2949         if (isfeasibleprefix()) {
2950                 if (scheduler->all_threads_sleeping())
2951                         model_print(" SLEEP-SET REDUNDANT");
2952                 model_print("\n");
2953         } else
2954                 print_infeasibility(" INFEASIBLE");
2955         print_list(action_trace);
2956         model_print("\n");
2957         if (!promises->empty()) {
2958                 model_print("Pending promises:\n");
2959                 for (unsigned int i = 0; i < promises->size(); i++) {
2960                         model_print(" [P%u] ", i);
2961                         (*promises)[i]->print();
2962                 }
2963                 model_print("\n");
2964         }
2965 }
2966
2967 /**
2968  * Add a Thread to the system for the first time. Should only be called once
2969  * per thread.
2970  * @param t The Thread to add
2971  */
2972 void ModelChecker::add_thread(Thread *t)
2973 {
2974         thread_map->put(id_to_int(t->get_id()), t);
2975         scheduler->add_thread(t);
2976 }
2977
2978 /**
2979  * @brief Get a Thread reference by its ID
2980  * @param tid The Thread's ID
2981  * @return A Thread reference
2982  */
2983 Thread * ModelChecker::get_thread(thread_id_t tid) const
2984 {
2985         return thread_map->get(id_to_int(tid));
2986 }
2987
2988 /**
2989  * @brief Get a reference to the Thread in which a ModelAction was executed
2990  * @param act The ModelAction
2991  * @return A Thread reference
2992  */
2993 Thread * ModelChecker::get_thread(const ModelAction *act) const
2994 {
2995         return get_thread(act->get_tid());
2996 }
2997
2998 /**
2999  * @brief Get a Promise's "promise number"
3000  *
3001  * A "promise number" is an index number that is unique to a promise, valid
3002  * only for a specific snapshot of an execution trace. Promises may come and go
3003  * as they are generated an resolved, so an index only retains meaning for the
3004  * current snapshot.
3005  *
3006  * @param promise The Promise to check
3007  * @return The promise index, if the promise still is valid; otherwise -1
3008  */
3009 int ModelChecker::get_promise_number(const Promise *promise) const
3010 {
3011         for (unsigned int i = 0; i < promises->size(); i++)
3012                 if ((*promises)[i] == promise)
3013                         return i;
3014         /* Not found */
3015         return -1;
3016 }
3017
3018 /**
3019  * @brief Check if a Thread is currently enabled
3020  * @param t The Thread to check
3021  * @return True if the Thread is currently enabled
3022  */
3023 bool ModelChecker::is_enabled(Thread *t) const
3024 {
3025         return scheduler->is_enabled(t);
3026 }
3027
3028 /**
3029  * @brief Check if a Thread is currently enabled
3030  * @param tid The ID of the Thread to check
3031  * @return True if the Thread is currently enabled
3032  */
3033 bool ModelChecker::is_enabled(thread_id_t tid) const
3034 {
3035         return scheduler->is_enabled(tid);
3036 }
3037
3038 /**
3039  * Switch from a model-checker context to a user-thread context. This is the
3040  * complement of ModelChecker::switch_to_master and must be called from the
3041  * model-checker context
3042  *
3043  * @param thread The user-thread to switch to
3044  */
3045 void ModelChecker::switch_from_master(Thread *thread)
3046 {
3047         scheduler->set_current_thread(thread);
3048         Thread::swap(&system_context, thread);
3049 }
3050
3051 /**
3052  * Switch from a user-context to the "master thread" context (a.k.a. system
3053  * context). This switch is made with the intention of exploring a particular
3054  * model-checking action (described by a ModelAction object). Must be called
3055  * from a user-thread context.
3056  *
3057  * @param act The current action that will be explored. May be NULL only if
3058  * trace is exiting via an assertion (see ModelChecker::set_assert and
3059  * ModelChecker::has_asserted).
3060  * @return Return the value returned by the current action
3061  */
3062 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3063 {
3064         DBG();
3065         Thread *old = thread_current();
3066         scheduler->set_current_thread(NULL);
3067         ASSERT(!old->get_pending());
3068         old->set_pending(act);
3069         if (Thread::swap(old, &system_context) < 0) {
3070                 perror("swap threads");
3071                 exit(EXIT_FAILURE);
3072         }
3073         return old->get_return_value();
3074 }
3075
3076 /**
3077  * Takes the next step in the execution, if possible.
3078  * @param curr The current step to take
3079  * @return Returns the next Thread to run, if any; NULL if this execution
3080  * should terminate
3081  */
3082 Thread * ModelChecker::take_step(ModelAction *curr)
3083 {
3084         Thread *curr_thrd = get_thread(curr);
3085         ASSERT(curr_thrd->get_state() == THREAD_READY);
3086
3087         ASSERT(check_action_enabled(curr)); /* May have side effects? */
3088         curr = check_current_action(curr);
3089         ASSERT(curr);
3090
3091         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3092                 scheduler->remove_thread(curr_thrd);
3093
3094         return action_select_next_thread(curr);
3095 }
3096
3097 /** Wrapper to run the user's main function, with appropriate arguments */
3098 void user_main_wrapper(void *)
3099 {
3100         user_main(model->params.argc, model->params.argv);
3101 }
3102
3103 /** @return True if the execution has taken too many steps */
3104 bool ModelChecker::too_many_steps() const
3105 {
3106         return params.bound != 0 && priv->used_sequence_numbers > params.bound;
3107 }
3108
3109 bool ModelChecker::should_terminate_execution()
3110 {
3111         /* Infeasible -> don't take any more steps */
3112         if (is_infeasible())
3113                 return true;
3114         else if (isfeasibleprefix() && have_bug_reports()) {
3115                 set_assert();
3116                 return true;
3117         }
3118
3119         if (too_many_steps())
3120                 return true;
3121         return false;
3122 }
3123
3124 /** @brief Run ModelChecker for the user program */
3125 void ModelChecker::run()
3126 {
3127         do {
3128                 thrd_t user_thread;
3129                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3130                 add_thread(t);
3131
3132                 do {
3133                         /*
3134                          * Stash next pending action(s) for thread(s). There
3135                          * should only need to stash one thread's action--the
3136                          * thread which just took a step--plus the first step
3137                          * for any newly-created thread
3138                          */
3139                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3140                                 thread_id_t tid = int_to_id(i);
3141                                 Thread *thr = get_thread(tid);
3142                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3143                                         switch_from_master(thr);
3144                                         if (thr->is_waiting_on(thr))
3145                                                 assert_bug("Deadlock detected (thread %u)", i);
3146                                 }
3147                         }
3148
3149                         /* Don't schedule threads which should be disabled */
3150                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3151                                 Thread *th = get_thread(int_to_id(i));
3152                                 ModelAction *act = th->get_pending();
3153                                 if (act && is_enabled(th) && !check_action_enabled(act)) {
3154                                         scheduler->sleep(th);
3155                                 }
3156                         }
3157
3158                         /* Catch assertions from prior take_step or from
3159                          * between-ModelAction bugs (e.g., data races) */
3160                         if (has_asserted())
3161                                 break;
3162
3163                         if (!t)
3164                                 t = get_next_thread();
3165                         if (!t || t->is_model_thread())
3166                                 break;
3167
3168                         /* Consume the next action for a Thread */
3169                         ModelAction *curr = t->get_pending();
3170                         t->set_pending(NULL);
3171                         t = take_step(curr);
3172                 } while (!should_terminate_execution());
3173
3174                 /*
3175                  * Launch end-of-execution release sequence fixups only when
3176                  * the execution is otherwise feasible AND there are:
3177                  *
3178                  * (1) pending release sequences
3179                  * (2) pending assertions that could be invalidated by a change
3180                  * in clock vectors (i.e., data races)
3181                  * (3) no pending promises
3182                  */
3183                 while (!pending_rel_seqs->empty() &&
3184                                 is_feasible_prefix_ignore_relseq() &&
3185                                 !unrealizedraces.empty()) {
3186                         model_print("*** WARNING: release sequence fixup action "
3187                                         "(%zu pending release seuqence(s)) ***\n",
3188                                         pending_rel_seqs->size());
3189                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3190                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3191                                         model_thread);
3192                         take_step(fixup);
3193                 };
3194         } while (next_execution());
3195
3196         model_print("******* Model-checking complete: *******\n");
3197         print_stats();
3198 }