model: merge duplicated code for WAIT and UNLOCK
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new SnapVector<Promise *>()),
90         futurevalues(new SnapVector<struct PendingFutureValue>()),
91         pending_rel_seqs(new SnapVector<struct release_seq *>()),
92         thrd_last_action(new SnapVector<ModelAction *>(1)),
93         thrd_last_fence_release(new SnapVector<ModelAction *>()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         SnapVector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new SnapVector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_behaviors()) {
264                         /* Execute the same thread with a new behavior */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else {
268                         ASSERT(prevnode);
269                         /* Make a different thread execute for next step */
270                         scheduler->add_sleep(get_thread(next->get_tid()));
271                         tid = prevnode->get_next_backtrack();
272                         /* Make sure the backtracked thread isn't sleeping. */
273                         node_stack->pop_restofstack(1);
274                         if (diverge == earliest_diverge) {
275                                 earliest_diverge = prevnode->get_action();
276                         }
277                 }
278                 /* Start the round robin scheduler from this thread id */
279                 scheduler->set_scheduler_thread(tid);
280                 /* The correct sleep set is in the parent node. */
281                 execute_sleep_set();
282
283                 DEBUG("*** Divergence point ***\n");
284
285                 diverge = NULL;
286         } else {
287                 tid = next->get_tid();
288         }
289         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
290         ASSERT(tid != THREAD_ID_T_NONE);
291         return thread_map->get(id_to_int(tid));
292 }
293
294 /**
295  * We need to know what the next actions of all threads in the sleep
296  * set will be.  This method computes them and stores the actions at
297  * the corresponding thread object's pending action.
298  */
299
300 void ModelChecker::execute_sleep_set()
301 {
302         for (unsigned int i = 0; i < get_num_threads(); i++) {
303                 thread_id_t tid = int_to_id(i);
304                 Thread *thr = get_thread(tid);
305                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
306                         thr->get_pending()->set_sleep_flag();
307                 }
308         }
309 }
310
311 /**
312  * @brief Should the current action wake up a given thread?
313  *
314  * @param curr The current action
315  * @param thread The thread that we might wake up
316  * @return True, if we should wake up the sleeping thread; false otherwise
317  */
318 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
319 {
320         const ModelAction *asleep = thread->get_pending();
321         /* Don't allow partial RMW to wake anyone up */
322         if (curr->is_rmwr())
323                 return false;
324         /* Synchronizing actions may have been backtracked */
325         if (asleep->could_synchronize_with(curr))
326                 return true;
327         /* All acquire/release fences and fence-acquire/store-release */
328         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
329                 return true;
330         /* Fence-release + store can awake load-acquire on the same location */
331         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
332                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
333                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
334                         return true;
335         }
336         return false;
337 }
338
339 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
340 {
341         for (unsigned int i = 0; i < get_num_threads(); i++) {
342                 Thread *thr = get_thread(int_to_id(i));
343                 if (scheduler->is_sleep_set(thr)) {
344                         if (should_wake_up(curr, thr))
345                                 /* Remove this thread from sleep set */
346                                 scheduler->remove_sleep(thr);
347                 }
348         }
349 }
350
351 /** @brief Alert the model-checker that an incorrectly-ordered
352  * synchronization was made */
353 void ModelChecker::set_bad_synchronization()
354 {
355         priv->bad_synchronization = true;
356 }
357
358 /**
359  * Check whether the current trace has triggered an assertion which should halt
360  * its execution.
361  *
362  * @return True, if the execution should be aborted; false otherwise
363  */
364 bool ModelChecker::has_asserted() const
365 {
366         return priv->asserted;
367 }
368
369 /**
370  * Trigger a trace assertion which should cause this execution to be halted.
371  * This can be due to a detected bug or due to an infeasibility that should
372  * halt ASAP.
373  */
374 void ModelChecker::set_assert()
375 {
376         priv->asserted = true;
377 }
378
379 /**
380  * Check if we are in a deadlock. Should only be called at the end of an
381  * execution, although it should not give false positives in the middle of an
382  * execution (there should be some ENABLED thread).
383  *
384  * @return True if program is in a deadlock; false otherwise
385  */
386 bool ModelChecker::is_deadlocked() const
387 {
388         bool blocking_threads = false;
389         for (unsigned int i = 0; i < get_num_threads(); i++) {
390                 thread_id_t tid = int_to_id(i);
391                 if (is_enabled(tid))
392                         return false;
393                 Thread *t = get_thread(tid);
394                 if (!t->is_model_thread() && t->get_pending())
395                         blocking_threads = true;
396         }
397         return blocking_threads;
398 }
399
400 /**
401  * Check if this is a complete execution. That is, have all thread completed
402  * execution (rather than exiting because sleep sets have forced a redundant
403  * execution).
404  *
405  * @return True if the execution is complete.
406  */
407 bool ModelChecker::is_complete_execution() const
408 {
409         for (unsigned int i = 0; i < get_num_threads(); i++)
410                 if (is_enabled(int_to_id(i)))
411                         return false;
412         return true;
413 }
414
415 /**
416  * @brief Assert a bug in the executing program.
417  *
418  * Use this function to assert any sort of bug in the user program. If the
419  * current trace is feasible (actually, a prefix of some feasible execution),
420  * then this execution will be aborted, printing the appropriate message. If
421  * the current trace is not yet feasible, the error message will be stashed and
422  * printed if the execution ever becomes feasible.
423  *
424  * @param msg Descriptive message for the bug (do not include newline char)
425  * @return True if bug is immediately-feasible
426  */
427 bool ModelChecker::assert_bug(const char *msg)
428 {
429         priv->bugs.push_back(new bug_message(msg));
430
431         if (isfeasibleprefix()) {
432                 set_assert();
433                 return true;
434         }
435         return false;
436 }
437
438 /**
439  * @brief Assert a bug in the executing program, asserted by a user thread
440  * @see ModelChecker::assert_bug
441  * @param msg Descriptive message for the bug (do not include newline char)
442  */
443 void ModelChecker::assert_user_bug(const char *msg)
444 {
445         /* If feasible bug, bail out now */
446         if (assert_bug(msg))
447                 switch_to_master(NULL);
448 }
449
450 /** @return True, if any bugs have been reported for this execution */
451 bool ModelChecker::have_bug_reports() const
452 {
453         return priv->bugs.size() != 0;
454 }
455
456 /** @brief Print bug report listing for this execution (if any bugs exist) */
457 void ModelChecker::print_bugs() const
458 {
459         if (have_bug_reports()) {
460                 model_print("Bug report: %zu bug%s detected\n",
461                                 priv->bugs.size(),
462                                 priv->bugs.size() > 1 ? "s" : "");
463                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
464                         priv->bugs[i]->print();
465         }
466 }
467
468 /**
469  * @brief Record end-of-execution stats
470  *
471  * Must be run when exiting an execution. Records various stats.
472  * @see struct execution_stats
473  */
474 void ModelChecker::record_stats()
475 {
476         stats.num_total++;
477         if (!isfeasibleprefix())
478                 stats.num_infeasible++;
479         else if (have_bug_reports())
480                 stats.num_buggy_executions++;
481         else if (is_complete_execution())
482                 stats.num_complete++;
483         else {
484                 stats.num_redundant++;
485
486                 /**
487                  * @todo We can violate this ASSERT() when fairness/sleep sets
488                  * conflict to cause an execution to terminate, e.g. with:
489                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
490                  */
491                 //ASSERT(scheduler->all_threads_sleeping());
492         }
493 }
494
495 /** @brief Print execution stats */
496 void ModelChecker::print_stats() const
497 {
498         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
499         model_print("Number of redundant executions: %d\n", stats.num_redundant);
500         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
501         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
502         model_print("Total executions: %d\n", stats.num_total);
503         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
504 }
505
506 /**
507  * @brief End-of-exeuction print
508  * @param printbugs Should any existing bugs be printed?
509  */
510 void ModelChecker::print_execution(bool printbugs) const
511 {
512         print_program_output();
513
514         if (params.verbose) {
515                 model_print("Earliest divergence point since last feasible execution:\n");
516                 if (earliest_diverge)
517                         earliest_diverge->print();
518                 else
519                         model_print("(Not set)\n");
520
521                 model_print("\n");
522                 print_stats();
523         }
524
525         /* Don't print invalid bugs */
526         if (printbugs)
527                 print_bugs();
528
529         model_print("\n");
530         print_summary();
531 }
532
533 /**
534  * Queries the model-checker for more executions to explore and, if one
535  * exists, resets the model-checker state to execute a new execution.
536  *
537  * @return If there are more executions to explore, return true. Otherwise,
538  * return false.
539  */
540 bool ModelChecker::next_execution()
541 {
542         DBG();
543         /* Is this execution a feasible execution that's worth bug-checking? */
544         bool complete = isfeasibleprefix() && (is_complete_execution() ||
545                         have_bug_reports());
546
547         /* End-of-execution bug checks */
548         if (complete) {
549                 if (is_deadlocked())
550                         assert_bug("Deadlock detected");
551
552                 checkDataRaces();
553         }
554
555         record_stats();
556
557         /* Output */
558         if (params.verbose || (complete && have_bug_reports()))
559                 print_execution(complete);
560         else
561                 clear_program_output();
562
563         if (complete)
564                 earliest_diverge = NULL;
565
566         if ((diverge = get_next_backtrack()) == NULL)
567                 return false;
568
569         if (DBG_ENABLED()) {
570                 model_print("Next execution will diverge at:\n");
571                 diverge->print();
572         }
573
574         reset_to_initial_state();
575         return true;
576 }
577
578 /**
579  * @brief Find the last fence-related backtracking conflict for a ModelAction
580  *
581  * This function performs the search for the most recent conflicting action
582  * against which we should perform backtracking, as affected by fence
583  * operations. This includes pairs of potentially-synchronizing actions which
584  * occur due to fence-acquire or fence-release, and hence should be explored in
585  * the opposite execution order.
586  *
587  * @param act The current action
588  * @return The most recent action which conflicts with act due to fences
589  */
590 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
591 {
592         /* Only perform release/acquire fence backtracking for stores */
593         if (!act->is_write())
594                 return NULL;
595
596         /* Find a fence-release (or, act is a release) */
597         ModelAction *last_release;
598         if (act->is_release())
599                 last_release = act;
600         else
601                 last_release = get_last_fence_release(act->get_tid());
602         if (!last_release)
603                 return NULL;
604
605         /* Skip past the release */
606         action_list_t *list = action_trace;
607         action_list_t::reverse_iterator rit;
608         for (rit = list->rbegin(); rit != list->rend(); rit++)
609                 if (*rit == last_release)
610                         break;
611         ASSERT(rit != list->rend());
612
613         /* Find a prior:
614          *   load-acquire
615          * or
616          *   load --sb-> fence-acquire */
617         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
618         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
619         bool found_acquire_fences = false;
620         for ( ; rit != list->rend(); rit++) {
621                 ModelAction *prev = *rit;
622                 if (act->same_thread(prev))
623                         continue;
624
625                 int tid = id_to_int(prev->get_tid());
626
627                 if (prev->is_read() && act->same_var(prev)) {
628                         if (prev->is_acquire()) {
629                                 /* Found most recent load-acquire, don't need
630                                  * to search for more fences */
631                                 if (!found_acquire_fences)
632                                         return NULL;
633                         } else {
634                                 prior_loads[tid] = prev;
635                         }
636                 }
637                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
638                         found_acquire_fences = true;
639                         acquire_fences[tid] = prev;
640                 }
641         }
642
643         ModelAction *latest_backtrack = NULL;
644         for (unsigned int i = 0; i < acquire_fences.size(); i++)
645                 if (acquire_fences[i] && prior_loads[i])
646                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
647                                 latest_backtrack = acquire_fences[i];
648         return latest_backtrack;
649 }
650
651 /**
652  * @brief Find the last backtracking conflict for a ModelAction
653  *
654  * This function performs the search for the most recent conflicting action
655  * against which we should perform backtracking. This primary includes pairs of
656  * synchronizing actions which should be explored in the opposite execution
657  * order.
658  *
659  * @param act The current action
660  * @return The most recent action which conflicts with act
661  */
662 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
663 {
664         switch (act->get_type()) {
665         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
666         case ATOMIC_READ:
667         case ATOMIC_WRITE:
668         case ATOMIC_RMW: {
669                 ModelAction *ret = NULL;
670
671                 /* linear search: from most recent to oldest */
672                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
673                 action_list_t::reverse_iterator rit;
674                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
675                         ModelAction *prev = *rit;
676                         if (prev->could_synchronize_with(act)) {
677                                 ret = prev;
678                                 break;
679                         }
680                 }
681
682                 ModelAction *ret2 = get_last_fence_conflict(act);
683                 if (!ret2)
684                         return ret;
685                 if (!ret)
686                         return ret2;
687                 if (*ret < *ret2)
688                         return ret2;
689                 return ret;
690         }
691         case ATOMIC_LOCK:
692         case ATOMIC_TRYLOCK: {
693                 /* linear search: from most recent to oldest */
694                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
695                 action_list_t::reverse_iterator rit;
696                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
697                         ModelAction *prev = *rit;
698                         if (act->is_conflicting_lock(prev))
699                                 return prev;
700                 }
701                 break;
702         }
703         case ATOMIC_UNLOCK: {
704                 /* linear search: from most recent to oldest */
705                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
706                 action_list_t::reverse_iterator rit;
707                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
708                         ModelAction *prev = *rit;
709                         if (!act->same_thread(prev) && prev->is_failed_trylock())
710                                 return prev;
711                 }
712                 break;
713         }
714         case ATOMIC_WAIT: {
715                 /* linear search: from most recent to oldest */
716                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
717                 action_list_t::reverse_iterator rit;
718                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
719                         ModelAction *prev = *rit;
720                         if (!act->same_thread(prev) && prev->is_failed_trylock())
721                                 return prev;
722                         if (!act->same_thread(prev) && prev->is_notify())
723                                 return prev;
724                 }
725                 break;
726         }
727
728         case ATOMIC_NOTIFY_ALL:
729         case ATOMIC_NOTIFY_ONE: {
730                 /* linear search: from most recent to oldest */
731                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
732                 action_list_t::reverse_iterator rit;
733                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
734                         ModelAction *prev = *rit;
735                         if (!act->same_thread(prev) && prev->is_wait())
736                                 return prev;
737                 }
738                 break;
739         }
740         default:
741                 break;
742         }
743         return NULL;
744 }
745
746 /** This method finds backtracking points where we should try to
747  * reorder the parameter ModelAction against.
748  *
749  * @param the ModelAction to find backtracking points for.
750  */
751 void ModelChecker::set_backtracking(ModelAction *act)
752 {
753         Thread *t = get_thread(act);
754         ModelAction *prev = get_last_conflict(act);
755         if (prev == NULL)
756                 return;
757
758         Node *node = prev->get_node()->get_parent();
759
760         int low_tid, high_tid;
761         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
762                 low_tid = id_to_int(act->get_tid());
763                 high_tid = low_tid + 1;
764         } else {
765                 low_tid = 0;
766                 high_tid = get_num_threads();
767         }
768
769         for (int i = low_tid; i < high_tid; i++) {
770                 thread_id_t tid = int_to_id(i);
771
772                 /* Make sure this thread can be enabled here. */
773                 if (i >= node->get_num_threads())
774                         break;
775
776                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
777                 if (node->enabled_status(tid) != THREAD_ENABLED)
778                         continue;
779
780                 /* Check if this has been explored already */
781                 if (node->has_been_explored(tid))
782                         continue;
783
784                 /* See if fairness allows */
785                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
786                         bool unfair = false;
787                         for (int t = 0; t < node->get_num_threads(); t++) {
788                                 thread_id_t tother = int_to_id(t);
789                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
790                                         unfair = true;
791                                         break;
792                                 }
793                         }
794                         if (unfair)
795                                 continue;
796                 }
797
798                 /* See if CHESS-like yield fairness allows */
799                 if (model->params.yieldon) {
800                         bool unfair = false;
801                         for (int t = 0; t < node->get_num_threads(); t++) {
802                                 thread_id_t tother = int_to_id(t);
803                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
804                                         unfair = true;
805                                         break;
806                                 }
807                         }
808                         if (unfair)
809                                 continue;
810                 }
811                 
812                 /* Cache the latest backtracking point */
813                 set_latest_backtrack(prev);
814
815                 /* If this is a new backtracking point, mark the tree */
816                 if (!node->set_backtrack(tid))
817                         continue;
818                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
819                                         id_to_int(prev->get_tid()),
820                                         id_to_int(t->get_id()));
821                 if (DBG_ENABLED()) {
822                         prev->print();
823                         act->print();
824                 }
825         }
826 }
827
828 /**
829  * @brief Cache the a backtracking point as the "most recent", if eligible
830  *
831  * Note that this does not prepare the NodeStack for this backtracking
832  * operation, it only caches the action on a per-execution basis
833  *
834  * @param act The operation at which we should explore a different next action
835  * (i.e., backtracking point)
836  * @return True, if this action is now the most recent backtracking point;
837  * false otherwise
838  */
839 bool ModelChecker::set_latest_backtrack(ModelAction *act)
840 {
841         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
842                 priv->next_backtrack = act;
843                 return true;
844         }
845         return false;
846 }
847
848 /**
849  * Returns last backtracking point. The model checker will explore a different
850  * path for this point in the next execution.
851  * @return The ModelAction at which the next execution should diverge.
852  */
853 ModelAction * ModelChecker::get_next_backtrack()
854 {
855         ModelAction *next = priv->next_backtrack;
856         priv->next_backtrack = NULL;
857         return next;
858 }
859
860 /**
861  * Processes a read model action.
862  * @param curr is the read model action to process.
863  * @return True if processing this read updates the mo_graph.
864  */
865 bool ModelChecker::process_read(ModelAction *curr)
866 {
867         Node *node = curr->get_node();
868         while (true) {
869                 bool updated = false;
870                 switch (node->get_read_from_status()) {
871                 case READ_FROM_PAST: {
872                         const ModelAction *rf = node->get_read_from_past();
873                         ASSERT(rf);
874
875                         mo_graph->startChanges();
876
877                         ASSERT(!is_infeasible());
878                         if (!check_recency(curr, rf)) {
879                                 if (node->increment_read_from()) {
880                                         mo_graph->rollbackChanges();
881                                         continue;
882                                 } else {
883                                         priv->too_many_reads = true;
884                                 }
885                         }
886
887                         updated = r_modification_order(curr, rf);
888                         read_from(curr, rf);
889                         mo_graph->commitChanges();
890                         mo_check_promises(curr, true);
891                         break;
892                 }
893                 case READ_FROM_PROMISE: {
894                         Promise *promise = curr->get_node()->get_read_from_promise();
895                         if (promise->add_reader(curr))
896                                 priv->failed_promise = true;
897                         curr->set_read_from_promise(promise);
898                         mo_graph->startChanges();
899                         if (!check_recency(curr, promise))
900                                 priv->too_many_reads = true;
901                         updated = r_modification_order(curr, promise);
902                         mo_graph->commitChanges();
903                         break;
904                 }
905                 case READ_FROM_FUTURE: {
906                         /* Read from future value */
907                         struct future_value fv = node->get_future_value();
908                         Promise *promise = new Promise(curr, fv);
909                         curr->set_read_from_promise(promise);
910                         promises->push_back(promise);
911                         mo_graph->startChanges();
912                         updated = r_modification_order(curr, promise);
913                         mo_graph->commitChanges();
914                         break;
915                 }
916                 default:
917                         ASSERT(false);
918                 }
919                 get_thread(curr)->set_return_value(curr->get_return_value());
920                 return updated;
921         }
922 }
923
924 /**
925  * Processes a lock, trylock, or unlock model action.  @param curr is
926  * the read model action to process.
927  *
928  * The try lock operation checks whether the lock is taken.  If not,
929  * it falls to the normal lock operation case.  If so, it returns
930  * fail.
931  *
932  * The lock operation has already been checked that it is enabled, so
933  * it just grabs the lock and synchronizes with the previous unlock.
934  *
935  * The unlock operation has to re-enable all of the threads that are
936  * waiting on the lock.
937  *
938  * @return True if synchronization was updated; false otherwise
939  */
940 bool ModelChecker::process_mutex(ModelAction *curr)
941 {
942         std::mutex *mutex = curr->get_mutex();
943         struct std::mutex_state *state = NULL;
944
945         if (mutex)
946                 state = mutex->get_state();
947
948         switch (curr->get_type()) {
949         case ATOMIC_TRYLOCK: {
950                 bool success = !state->locked;
951                 curr->set_try_lock(success);
952                 if (!success) {
953                         get_thread(curr)->set_return_value(0);
954                         break;
955                 }
956                 get_thread(curr)->set_return_value(1);
957         }
958                 //otherwise fall into the lock case
959         case ATOMIC_LOCK: {
960                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
961                         assert_bug("Lock access before initialization");
962                 state->locked = get_thread(curr);
963                 ModelAction *unlock = get_last_unlock(curr);
964                 //synchronize with the previous unlock statement
965                 if (unlock != NULL) {
966                         curr->synchronize_with(unlock);
967                         return true;
968                 }
969                 break;
970         }
971         case ATOMIC_WAIT:
972         case ATOMIC_UNLOCK: {
973                 //unlock the lock
974                 state->locked = NULL;
975                 //wake up the other threads
976                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, mutex);
977                 //activate all the waiting threads
978                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
979                         scheduler->wake(get_thread(*rit));
980                 }
981                 waiters->clear();
982
983                 if (!curr->is_wait())
984                         break; /* The rest is only for ATOMIC_WAIT */
985
986                 /* Should we go to sleep? (simulate spurious failures) */
987                 if (curr->get_node()->get_misc() == 0) {
988                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
989                         /* disable us */
990                         scheduler->sleep(get_thread(curr));
991                 }
992                 break;
993         }
994         case ATOMIC_NOTIFY_ALL: {
995                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
996                 //activate all the waiting threads
997                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
998                         scheduler->wake(get_thread(*rit));
999                 }
1000                 waiters->clear();
1001                 break;
1002         }
1003         case ATOMIC_NOTIFY_ONE: {
1004                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1005                 int wakeupthread = curr->get_node()->get_misc();
1006                 action_list_t::iterator it = waiters->begin();
1007                 advance(it, wakeupthread);
1008                 scheduler->wake(get_thread(*it));
1009                 waiters->erase(it);
1010                 break;
1011         }
1012
1013         default:
1014                 ASSERT(0);
1015         }
1016         return false;
1017 }
1018
1019 /**
1020  * @brief Check if the current pending promises allow a future value to be sent
1021  *
1022  * If one of the following is true:
1023  *  (a) there are no pending promises
1024  *  (b) the reader and writer do not cross any promises
1025  * Then, it is safe to pass a future value back now.
1026  *
1027  * Otherwise, we must save the pending future value until (a) or (b) is true
1028  *
1029  * @param writer The operation which sends the future value. Must be a write.
1030  * @param reader The operation which will observe the value. Must be a read.
1031  * @return True if the future value can be sent now; false if it must wait.
1032  */
1033 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1034                 const ModelAction *reader) const
1035 {
1036         if (promises->empty())
1037                 return true;
1038         for(int i=promises->size()-1;i>=0;i--) {
1039                 ModelAction *pr=(*promises)[i]->get_reader(0);
1040                 //reader is after promise...doesn't cross any promise
1041                 if (*reader > *pr)
1042                         return true;
1043                 //writer is after promise, reader before...bad...
1044                 if (*writer > *pr)
1045                         return false;
1046         }
1047         return true;
1048 }
1049
1050 /**
1051  * @brief Add a future value to a reader
1052  *
1053  * This function performs a few additional checks to ensure that the future
1054  * value can be feasibly observed by the reader
1055  *
1056  * @param writer The operation whose value is sent. Must be a write.
1057  * @param reader The read operation which may read the future value. Must be a read.
1058  */
1059 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1060 {
1061         /* Do more ambitious checks now that mo is more complete */
1062         if (!mo_may_allow(writer, reader))
1063                 return;
1064
1065         Node *node = reader->get_node();
1066
1067         /* Find an ancestor thread which exists at the time of the reader */
1068         Thread *write_thread = get_thread(writer);
1069         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1070                 write_thread = write_thread->get_parent();
1071
1072         struct future_value fv = {
1073                 writer->get_write_value(),
1074                 writer->get_seq_number() + params.maxfuturedelay,
1075                 write_thread->get_id(),
1076         };
1077         if (node->add_future_value(fv))
1078                 set_latest_backtrack(reader);
1079 }
1080
1081 /**
1082  * Process a write ModelAction
1083  * @param curr The ModelAction to process
1084  * @return True if the mo_graph was updated or promises were resolved
1085  */
1086 bool ModelChecker::process_write(ModelAction *curr)
1087 {
1088         /* Readers to which we may send our future value */
1089         ModelVector<ModelAction *> send_fv;
1090
1091         const ModelAction *earliest_promise_reader;
1092         bool updated_promises = false;
1093
1094         bool updated_mod_order = w_modification_order(curr, &send_fv);
1095         Promise *promise = pop_promise_to_resolve(curr);
1096
1097         if (promise) {
1098                 earliest_promise_reader = promise->get_reader(0);
1099                 updated_promises = resolve_promise(curr, promise);
1100         } else
1101                 earliest_promise_reader = NULL;
1102
1103         for (unsigned int i = 0; i < send_fv.size(); i++) {
1104                 ModelAction *read = send_fv[i];
1105
1106                 /* Don't send future values to reads after the Promise we resolve */
1107                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1108                         /* Check if future value can be sent immediately */
1109                         if (promises_may_allow(curr, read)) {
1110                                 add_future_value(curr, read);
1111                         } else {
1112                                 futurevalues->push_back(PendingFutureValue(curr, read));
1113                         }
1114                 }
1115         }
1116
1117         /* Check the pending future values */
1118         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1119                 struct PendingFutureValue pfv = (*futurevalues)[i];
1120                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1121                         add_future_value(pfv.writer, pfv.reader);
1122                         futurevalues->erase(futurevalues->begin() + i);
1123                 }
1124         }
1125
1126         mo_graph->commitChanges();
1127         mo_check_promises(curr, false);
1128
1129         get_thread(curr)->set_return_value(VALUE_NONE);
1130         return updated_mod_order || updated_promises;
1131 }
1132
1133 /**
1134  * Process a fence ModelAction
1135  * @param curr The ModelAction to process
1136  * @return True if synchronization was updated
1137  */
1138 bool ModelChecker::process_fence(ModelAction *curr)
1139 {
1140         /*
1141          * fence-relaxed: no-op
1142          * fence-release: only log the occurence (not in this function), for
1143          *   use in later synchronization
1144          * fence-acquire (this function): search for hypothetical release
1145          *   sequences
1146          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1147          */
1148         bool updated = false;
1149         if (curr->is_acquire()) {
1150                 action_list_t *list = action_trace;
1151                 action_list_t::reverse_iterator rit;
1152                 /* Find X : is_read(X) && X --sb-> curr */
1153                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1154                         ModelAction *act = *rit;
1155                         if (act == curr)
1156                                 continue;
1157                         if (act->get_tid() != curr->get_tid())
1158                                 continue;
1159                         /* Stop at the beginning of the thread */
1160                         if (act->is_thread_start())
1161                                 break;
1162                         /* Stop once we reach a prior fence-acquire */
1163                         if (act->is_fence() && act->is_acquire())
1164                                 break;
1165                         if (!act->is_read())
1166                                 continue;
1167                         /* read-acquire will find its own release sequences */
1168                         if (act->is_acquire())
1169                                 continue;
1170
1171                         /* Establish hypothetical release sequences */
1172                         rel_heads_list_t release_heads;
1173                         get_release_seq_heads(curr, act, &release_heads);
1174                         for (unsigned int i = 0; i < release_heads.size(); i++)
1175                                 if (!curr->synchronize_with(release_heads[i]))
1176                                         set_bad_synchronization();
1177                         if (release_heads.size() != 0)
1178                                 updated = true;
1179                 }
1180         }
1181         return updated;
1182 }
1183
1184 /**
1185  * @brief Process the current action for thread-related activity
1186  *
1187  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1188  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1189  * synchronization, etc.  This function is a no-op for non-THREAD actions
1190  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1191  *
1192  * @param curr The current action
1193  * @return True if synchronization was updated or a thread completed
1194  */
1195 bool ModelChecker::process_thread_action(ModelAction *curr)
1196 {
1197         bool updated = false;
1198
1199         switch (curr->get_type()) {
1200         case THREAD_CREATE: {
1201                 thrd_t *thrd = (thrd_t *)curr->get_location();
1202                 struct thread_params *params = (struct thread_params *)curr->get_value();
1203                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1204                 add_thread(th);
1205                 th->set_creation(curr);
1206                 /* Promises can be satisfied by children */
1207                 for (unsigned int i = 0; i < promises->size(); i++) {
1208                         Promise *promise = (*promises)[i];
1209                         if (promise->thread_is_available(curr->get_tid()))
1210                                 promise->add_thread(th->get_id());
1211                 }
1212                 break;
1213         }
1214         case THREAD_JOIN: {
1215                 Thread *blocking = curr->get_thread_operand();
1216                 ModelAction *act = get_last_action(blocking->get_id());
1217                 curr->synchronize_with(act);
1218                 updated = true; /* trigger rel-seq checks */
1219                 break;
1220         }
1221         case THREAD_FINISH: {
1222                 Thread *th = get_thread(curr);
1223                 while (!th->wait_list_empty()) {
1224                         ModelAction *act = th->pop_wait_list();
1225                         scheduler->wake(get_thread(act));
1226                 }
1227                 th->complete();
1228                 /* Completed thread can't satisfy promises */
1229                 for (unsigned int i = 0; i < promises->size(); i++) {
1230                         Promise *promise = (*promises)[i];
1231                         if (promise->thread_is_available(th->get_id()))
1232                                 if (promise->eliminate_thread(th->get_id()))
1233                                         priv->failed_promise = true;
1234                 }
1235                 updated = true; /* trigger rel-seq checks */
1236                 break;
1237         }
1238         case THREAD_START: {
1239                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1240                 break;
1241         }
1242         default:
1243                 break;
1244         }
1245
1246         return updated;
1247 }
1248
1249 /**
1250  * @brief Process the current action for release sequence fixup activity
1251  *
1252  * Performs model-checker release sequence fixups for the current action,
1253  * forcing a single pending release sequence to break (with a given, potential
1254  * "loose" write) or to complete (i.e., synchronize). If a pending release
1255  * sequence forms a complete release sequence, then we must perform the fixup
1256  * synchronization, mo_graph additions, etc.
1257  *
1258  * @param curr The current action; must be a release sequence fixup action
1259  * @param work_queue The work queue to which to add work items as they are
1260  * generated
1261  */
1262 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1263 {
1264         const ModelAction *write = curr->get_node()->get_relseq_break();
1265         struct release_seq *sequence = pending_rel_seqs->back();
1266         pending_rel_seqs->pop_back();
1267         ASSERT(sequence);
1268         ModelAction *acquire = sequence->acquire;
1269         const ModelAction *rf = sequence->rf;
1270         const ModelAction *release = sequence->release;
1271         ASSERT(acquire);
1272         ASSERT(release);
1273         ASSERT(rf);
1274         ASSERT(release->same_thread(rf));
1275
1276         if (write == NULL) {
1277                 /**
1278                  * @todo Forcing a synchronization requires that we set
1279                  * modification order constraints. For instance, we can't allow
1280                  * a fixup sequence in which two separate read-acquire
1281                  * operations read from the same sequence, where the first one
1282                  * synchronizes and the other doesn't. Essentially, we can't
1283                  * allow any writes to insert themselves between 'release' and
1284                  * 'rf'
1285                  */
1286
1287                 /* Must synchronize */
1288                 if (!acquire->synchronize_with(release)) {
1289                         set_bad_synchronization();
1290                         return;
1291                 }
1292                 /* Re-check all pending release sequences */
1293                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1294                 /* Re-check act for mo_graph edges */
1295                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1296
1297                 /* propagate synchronization to later actions */
1298                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1299                 for (; (*rit) != acquire; rit++) {
1300                         ModelAction *propagate = *rit;
1301                         if (acquire->happens_before(propagate)) {
1302                                 propagate->synchronize_with(acquire);
1303                                 /* Re-check 'propagate' for mo_graph edges */
1304                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1305                         }
1306                 }
1307         } else {
1308                 /* Break release sequence with new edges:
1309                  *   release --mo--> write --mo--> rf */
1310                 mo_graph->addEdge(release, write);
1311                 mo_graph->addEdge(write, rf);
1312         }
1313
1314         /* See if we have realized a data race */
1315         checkDataRaces();
1316 }
1317
1318 /**
1319  * Initialize the current action by performing one or more of the following
1320  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1321  * in the NodeStack, manipulating backtracking sets, allocating and
1322  * initializing clock vectors, and computing the promises to fulfill.
1323  *
1324  * @param curr The current action, as passed from the user context; may be
1325  * freed/invalidated after the execution of this function, with a different
1326  * action "returned" its place (pass-by-reference)
1327  * @return True if curr is a newly-explored action; false otherwise
1328  */
1329 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1330 {
1331         ModelAction *newcurr;
1332
1333         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1334                 newcurr = process_rmw(*curr);
1335                 delete *curr;
1336
1337                 if (newcurr->is_rmw())
1338                         compute_promises(newcurr);
1339
1340                 *curr = newcurr;
1341                 return false;
1342         }
1343
1344         (*curr)->set_seq_number(get_next_seq_num());
1345
1346         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1347         if (newcurr) {
1348                 /* First restore type and order in case of RMW operation */
1349                 if ((*curr)->is_rmwr())
1350                         newcurr->copy_typeandorder(*curr);
1351
1352                 ASSERT((*curr)->get_location() == newcurr->get_location());
1353                 newcurr->copy_from_new(*curr);
1354
1355                 /* Discard duplicate ModelAction; use action from NodeStack */
1356                 delete *curr;
1357
1358                 /* Always compute new clock vector */
1359                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1360
1361                 *curr = newcurr;
1362                 return false; /* Action was explored previously */
1363         } else {
1364                 newcurr = *curr;
1365
1366                 /* Always compute new clock vector */
1367                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1368
1369                 /* Assign most recent release fence */
1370                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1371
1372                 /*
1373                  * Perform one-time actions when pushing new ModelAction onto
1374                  * NodeStack
1375                  */
1376                 if (newcurr->is_write())
1377                         compute_promises(newcurr);
1378                 else if (newcurr->is_relseq_fixup())
1379                         compute_relseq_breakwrites(newcurr);
1380                 else if (newcurr->is_wait())
1381                         newcurr->get_node()->set_misc_max(2);
1382                 else if (newcurr->is_notify_one()) {
1383                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1384                 }
1385                 return true; /* This was a new ModelAction */
1386         }
1387 }
1388
1389 /**
1390  * @brief Establish reads-from relation between two actions
1391  *
1392  * Perform basic operations involved with establishing a concrete rf relation,
1393  * including setting the ModelAction data and checking for release sequences.
1394  *
1395  * @param act The action that is reading (must be a read)
1396  * @param rf The action from which we are reading (must be a write)
1397  *
1398  * @return True if this read established synchronization
1399  */
1400 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1401 {
1402         ASSERT(rf);
1403         ASSERT(rf->is_write());
1404
1405         act->set_read_from(rf);
1406         if (act->is_acquire()) {
1407                 rel_heads_list_t release_heads;
1408                 get_release_seq_heads(act, act, &release_heads);
1409                 int num_heads = release_heads.size();
1410                 for (unsigned int i = 0; i < release_heads.size(); i++)
1411                         if (!act->synchronize_with(release_heads[i])) {
1412                                 set_bad_synchronization();
1413                                 num_heads--;
1414                         }
1415                 return num_heads > 0;
1416         }
1417         return false;
1418 }
1419
1420 /**
1421  * Check promises and eliminate potentially-satisfying threads when a thread is
1422  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1423  * no longer satisfy a promise generated from that thread.
1424  *
1425  * @param blocker The thread on which a thread is waiting
1426  * @param waiting The waiting thread
1427  */
1428 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1429 {
1430         for (unsigned int i = 0; i < promises->size(); i++) {
1431                 Promise *promise = (*promises)[i];
1432                 if (!promise->thread_is_available(waiting->get_id()))
1433                         continue;
1434                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1435                         ModelAction *reader = promise->get_reader(j);
1436                         if (reader->get_tid() != blocker->get_id())
1437                                 continue;
1438                         if (promise->eliminate_thread(waiting->get_id())) {
1439                                 /* Promise has failed */
1440                                 priv->failed_promise = true;
1441                         } else {
1442                                 /* Only eliminate the 'waiting' thread once */
1443                                 return;
1444                         }
1445                 }
1446         }
1447 }
1448
1449 /**
1450  * @brief Check whether a model action is enabled.
1451  *
1452  * Checks whether a lock or join operation would be successful (i.e., is the
1453  * lock already locked, or is the joined thread already complete). If not, put
1454  * the action in a waiter list.
1455  *
1456  * @param curr is the ModelAction to check whether it is enabled.
1457  * @return a bool that indicates whether the action is enabled.
1458  */
1459 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1460         if (curr->is_lock()) {
1461                 std::mutex *lock = (std::mutex *)curr->get_location();
1462                 struct std::mutex_state *state = lock->get_state();
1463                 if (state->locked) {
1464                         //Stick the action in the appropriate waiting queue
1465                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1466                         return false;
1467                 }
1468         } else if (curr->is_thread_join()) {
1469                 Thread *blocking = (Thread *)curr->get_location();
1470                 if (!blocking->is_complete()) {
1471                         blocking->push_wait_list(curr);
1472                         thread_blocking_check_promises(blocking, get_thread(curr));
1473                         return false;
1474                 }
1475         }
1476
1477         return true;
1478 }
1479
1480 /**
1481  * This is the heart of the model checker routine. It performs model-checking
1482  * actions corresponding to a given "current action." Among other processes, it
1483  * calculates reads-from relationships, updates synchronization clock vectors,
1484  * forms a memory_order constraints graph, and handles replay/backtrack
1485  * execution when running permutations of previously-observed executions.
1486  *
1487  * @param curr The current action to process
1488  * @return The ModelAction that is actually executed; may be different than
1489  * curr; may be NULL, if the current action is not enabled to run
1490  */
1491 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1492 {
1493         ASSERT(curr);
1494         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1495
1496         if (!check_action_enabled(curr)) {
1497                 /* Make the execution look like we chose to run this action
1498                  * much later, when a lock/join can succeed */
1499                 get_thread(curr)->set_pending(curr);
1500                 scheduler->sleep(get_thread(curr));
1501                 return NULL;
1502         }
1503
1504         bool newly_explored = initialize_curr_action(&curr);
1505
1506         DBG();
1507         if (DBG_ENABLED())
1508                 curr->print();
1509
1510         wake_up_sleeping_actions(curr);
1511
1512         /* Compute fairness information for CHESS yield algorithm */
1513         if (model->params.yieldon) {
1514                 curr->get_node()->update_yield(scheduler);
1515         }
1516
1517         /* Add the action to lists before any other model-checking tasks */
1518         if (!second_part_of_rmw)
1519                 add_action_to_lists(curr);
1520
1521         /* Build may_read_from set for newly-created actions */
1522         if (newly_explored && curr->is_read())
1523                 build_may_read_from(curr);
1524
1525         /* Initialize work_queue with the "current action" work */
1526         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1527         while (!work_queue.empty() && !has_asserted()) {
1528                 WorkQueueEntry work = work_queue.front();
1529                 work_queue.pop_front();
1530
1531                 switch (work.type) {
1532                 case WORK_CHECK_CURR_ACTION: {
1533                         ModelAction *act = work.action;
1534                         bool update = false; /* update this location's release seq's */
1535                         bool update_all = false; /* update all release seq's */
1536
1537                         if (process_thread_action(curr))
1538                                 update_all = true;
1539
1540                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1541                                 update = true;
1542
1543                         if (act->is_write() && process_write(act))
1544                                 update = true;
1545
1546                         if (act->is_fence() && process_fence(act))
1547                                 update_all = true;
1548
1549                         if (act->is_mutex_op() && process_mutex(act))
1550                                 update_all = true;
1551
1552                         if (act->is_relseq_fixup())
1553                                 process_relseq_fixup(curr, &work_queue);
1554
1555                         if (update_all)
1556                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1557                         else if (update)
1558                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1559                         break;
1560                 }
1561                 case WORK_CHECK_RELEASE_SEQ:
1562                         resolve_release_sequences(work.location, &work_queue);
1563                         break;
1564                 case WORK_CHECK_MO_EDGES: {
1565                         /** @todo Complete verification of work_queue */
1566                         ModelAction *act = work.action;
1567                         bool updated = false;
1568
1569                         if (act->is_read()) {
1570                                 const ModelAction *rf = act->get_reads_from();
1571                                 const Promise *promise = act->get_reads_from_promise();
1572                                 if (rf) {
1573                                         if (r_modification_order(act, rf))
1574                                                 updated = true;
1575                                 } else if (promise) {
1576                                         if (r_modification_order(act, promise))
1577                                                 updated = true;
1578                                 }
1579                         }
1580                         if (act->is_write()) {
1581                                 if (w_modification_order(act, NULL))
1582                                         updated = true;
1583                         }
1584                         mo_graph->commitChanges();
1585
1586                         if (updated)
1587                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1588                         break;
1589                 }
1590                 default:
1591                         ASSERT(false);
1592                         break;
1593                 }
1594         }
1595
1596         check_curr_backtracking(curr);
1597         set_backtracking(curr);
1598         return curr;
1599 }
1600
1601 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1602 {
1603         Node *currnode = curr->get_node();
1604         Node *parnode = currnode->get_parent();
1605
1606         if ((parnode && !parnode->backtrack_empty()) ||
1607                          !currnode->misc_empty() ||
1608                          !currnode->read_from_empty() ||
1609                          !currnode->promise_empty() ||
1610                          !currnode->relseq_break_empty()) {
1611                 set_latest_backtrack(curr);
1612         }
1613 }
1614
1615 bool ModelChecker::promises_expired() const
1616 {
1617         for (unsigned int i = 0; i < promises->size(); i++) {
1618                 Promise *promise = (*promises)[i];
1619                 if (promise->get_expiration() < priv->used_sequence_numbers)
1620                         return true;
1621         }
1622         return false;
1623 }
1624
1625 /**
1626  * This is the strongest feasibility check available.
1627  * @return whether the current trace (partial or complete) must be a prefix of
1628  * a feasible trace.
1629  */
1630 bool ModelChecker::isfeasibleprefix() const
1631 {
1632         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1633 }
1634
1635 /**
1636  * Print disagnostic information about an infeasible execution
1637  * @param prefix A string to prefix the output with; if NULL, then a default
1638  * message prefix will be provided
1639  */
1640 void ModelChecker::print_infeasibility(const char *prefix) const
1641 {
1642         char buf[100];
1643         char *ptr = buf;
1644         if (mo_graph->checkForCycles())
1645                 ptr += sprintf(ptr, "[mo cycle]");
1646         if (priv->failed_promise)
1647                 ptr += sprintf(ptr, "[failed promise]");
1648         if (priv->too_many_reads)
1649                 ptr += sprintf(ptr, "[too many reads]");
1650         if (priv->no_valid_reads)
1651                 ptr += sprintf(ptr, "[no valid reads-from]");
1652         if (priv->bad_synchronization)
1653                 ptr += sprintf(ptr, "[bad sw ordering]");
1654         if (promises_expired())
1655                 ptr += sprintf(ptr, "[promise expired]");
1656         if (promises->size() != 0)
1657                 ptr += sprintf(ptr, "[unresolved promise]");
1658         if (ptr != buf)
1659                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1660 }
1661
1662 /**
1663  * Returns whether the current completed trace is feasible, except for pending
1664  * release sequences.
1665  */
1666 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1667 {
1668         return !is_infeasible() && promises->size() == 0;
1669 }
1670
1671 /**
1672  * Check if the current partial trace is infeasible. Does not check any
1673  * end-of-execution flags, which might rule out the execution. Thus, this is
1674  * useful only for ruling an execution as infeasible.
1675  * @return whether the current partial trace is infeasible.
1676  */
1677 bool ModelChecker::is_infeasible() const
1678 {
1679         return mo_graph->checkForCycles() ||
1680                 priv->no_valid_reads ||
1681                 priv->failed_promise ||
1682                 priv->too_many_reads ||
1683                 priv->bad_synchronization ||
1684                 promises_expired();
1685 }
1686
1687 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1688 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1689         ModelAction *lastread = get_last_action(act->get_tid());
1690         lastread->process_rmw(act);
1691         if (act->is_rmw()) {
1692                 if (lastread->get_reads_from())
1693                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1694                 else
1695                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1696                 mo_graph->commitChanges();
1697         }
1698         return lastread;
1699 }
1700
1701 /**
1702  * A helper function for ModelChecker::check_recency, to check if the current
1703  * thread is able to read from a different write/promise for 'params.maxreads'
1704  * number of steps and if that write/promise should become visible (i.e., is
1705  * ordered later in the modification order). This helps model memory liveness.
1706  *
1707  * @param curr The current action. Must be a read.
1708  * @param rf The write/promise from which we plan to read
1709  * @param other_rf The write/promise from which we may read
1710  * @return True if we were able to read from other_rf for params.maxreads steps
1711  */
1712 template <typename T, typename U>
1713 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1714 {
1715         /* Need a different write/promise */
1716         if (other_rf->equals(rf))
1717                 return false;
1718
1719         /* Only look for "newer" writes/promises */
1720         if (!mo_graph->checkReachable(rf, other_rf))
1721                 return false;
1722
1723         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1724         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1725         action_list_t::reverse_iterator rit = list->rbegin();
1726         ASSERT((*rit) == curr);
1727         /* Skip past curr */
1728         rit++;
1729
1730         /* Does this write/promise work for everyone? */
1731         for (int i = 0; i < params.maxreads; i++, rit++) {
1732                 ModelAction *act = *rit;
1733                 if (!act->may_read_from(other_rf))
1734                         return false;
1735         }
1736         return true;
1737 }
1738
1739 /**
1740  * Checks whether a thread has read from the same write or Promise for too many
1741  * times without seeing the effects of a later write/Promise.
1742  *
1743  * Basic idea:
1744  * 1) there must a different write/promise that we could read from,
1745  * 2) we must have read from the same write/promise in excess of maxreads times,
1746  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1747  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1748  *
1749  * If so, we decide that the execution is no longer feasible.
1750  *
1751  * @param curr The current action. Must be a read.
1752  * @param rf The ModelAction/Promise from which we might read.
1753  * @return True if the read should succeed; false otherwise
1754  */
1755 template <typename T>
1756 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1757 {
1758         if (!params.maxreads)
1759                 return true;
1760
1761         //NOTE: Next check is just optimization, not really necessary....
1762         if (curr->get_node()->get_read_from_past_size() +
1763                         curr->get_node()->get_read_from_promise_size() <= 1)
1764                 return true;
1765
1766         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1767         int tid = id_to_int(curr->get_tid());
1768         ASSERT(tid < (int)thrd_lists->size());
1769         action_list_t *list = &(*thrd_lists)[tid];
1770         action_list_t::reverse_iterator rit = list->rbegin();
1771         ASSERT((*rit) == curr);
1772         /* Skip past curr */
1773         rit++;
1774
1775         action_list_t::reverse_iterator ritcopy = rit;
1776         /* See if we have enough reads from the same value */
1777         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1778                 if (ritcopy == list->rend())
1779                         return true;
1780                 ModelAction *act = *ritcopy;
1781                 if (!act->is_read())
1782                         return true;
1783                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1784                         return true;
1785                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1786                         return true;
1787                 if (act->get_node()->get_read_from_past_size() +
1788                                 act->get_node()->get_read_from_promise_size() <= 1)
1789                         return true;
1790         }
1791         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1792                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1793                 if (should_read_instead(curr, rf, write))
1794                         return false; /* liveness failure */
1795         }
1796         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1797                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1798                 if (should_read_instead(curr, rf, promise))
1799                         return false; /* liveness failure */
1800         }
1801         return true;
1802 }
1803
1804 /**
1805  * Updates the mo_graph with the constraints imposed from the current
1806  * read.
1807  *
1808  * Basic idea is the following: Go through each other thread and find
1809  * the last action that happened before our read.  Two cases:
1810  *
1811  * (1) The action is a write => that write must either occur before
1812  * the write we read from or be the write we read from.
1813  *
1814  * (2) The action is a read => the write that that action read from
1815  * must occur before the write we read from or be the same write.
1816  *
1817  * @param curr The current action. Must be a read.
1818  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1819  * @return True if modification order edges were added; false otherwise
1820  */
1821 template <typename rf_type>
1822 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1823 {
1824         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1825         unsigned int i;
1826         bool added = false;
1827         ASSERT(curr->is_read());
1828
1829         /* Last SC fence in the current thread */
1830         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1831         ModelAction *last_sc_write = NULL;
1832         if (curr->is_seqcst())
1833                 last_sc_write = get_last_seq_cst_write(curr);
1834
1835         /* Iterate over all threads */
1836         for (i = 0; i < thrd_lists->size(); i++) {
1837                 /* Last SC fence in thread i */
1838                 ModelAction *last_sc_fence_thread_local = NULL;
1839                 if (int_to_id((int)i) != curr->get_tid())
1840                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1841
1842                 /* Last SC fence in thread i, before last SC fence in current thread */
1843                 ModelAction *last_sc_fence_thread_before = NULL;
1844                 if (last_sc_fence_local)
1845                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1846
1847                 /* Iterate over actions in thread, starting from most recent */
1848                 action_list_t *list = &(*thrd_lists)[i];
1849                 action_list_t::reverse_iterator rit;
1850                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1851                         ModelAction *act = *rit;
1852
1853                         /* Skip curr */
1854                         if (act == curr)
1855                                 continue;
1856                         /* Don't want to add reflexive edges on 'rf' */
1857                         if (act->equals(rf)) {
1858                                 if (act->happens_before(curr))
1859                                         break;
1860                                 else
1861                                         continue;
1862                         }
1863
1864                         if (act->is_write()) {
1865                                 /* C++, Section 29.3 statement 5 */
1866                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1867                                                 *act < *last_sc_fence_thread_local) {
1868                                         added = mo_graph->addEdge(act, rf) || added;
1869                                         break;
1870                                 }
1871                                 /* C++, Section 29.3 statement 4 */
1872                                 else if (act->is_seqcst() && last_sc_fence_local &&
1873                                                 *act < *last_sc_fence_local) {
1874                                         added = mo_graph->addEdge(act, rf) || added;
1875                                         break;
1876                                 }
1877                                 /* C++, Section 29.3 statement 6 */
1878                                 else if (last_sc_fence_thread_before &&
1879                                                 *act < *last_sc_fence_thread_before) {
1880                                         added = mo_graph->addEdge(act, rf) || added;
1881                                         break;
1882                                 }
1883                         }
1884
1885                         /* C++, Section 29.3 statement 3 (second subpoint) */
1886                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1887                                 added = mo_graph->addEdge(act, rf) || added;
1888                                 break;
1889                         }
1890
1891                         /*
1892                          * Include at most one act per-thread that "happens
1893                          * before" curr
1894                          */
1895                         if (act->happens_before(curr)) {
1896                                 if (act->is_write()) {
1897                                         added = mo_graph->addEdge(act, rf) || added;
1898                                 } else {
1899                                         const ModelAction *prevrf = act->get_reads_from();
1900                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1901                                         if (prevrf) {
1902                                                 if (!prevrf->equals(rf))
1903                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1904                                         } else if (!prevrf_promise->equals(rf)) {
1905                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1906                                         }
1907                                 }
1908                                 break;
1909                         }
1910                 }
1911         }
1912
1913         /*
1914          * All compatible, thread-exclusive promises must be ordered after any
1915          * concrete loads from the same thread
1916          */
1917         for (unsigned int i = 0; i < promises->size(); i++)
1918                 if ((*promises)[i]->is_compatible_exclusive(curr))
1919                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1920
1921         return added;
1922 }
1923
1924 /**
1925  * Updates the mo_graph with the constraints imposed from the current write.
1926  *
1927  * Basic idea is the following: Go through each other thread and find
1928  * the lastest action that happened before our write.  Two cases:
1929  *
1930  * (1) The action is a write => that write must occur before
1931  * the current write
1932  *
1933  * (2) The action is a read => the write that that action read from
1934  * must occur before the current write.
1935  *
1936  * This method also handles two other issues:
1937  *
1938  * (I) Sequential Consistency: Making sure that if the current write is
1939  * seq_cst, that it occurs after the previous seq_cst write.
1940  *
1941  * (II) Sending the write back to non-synchronizing reads.
1942  *
1943  * @param curr The current action. Must be a write.
1944  * @param send_fv A vector for stashing reads to which we may pass our future
1945  * value. If NULL, then don't record any future values.
1946  * @return True if modification order edges were added; false otherwise
1947  */
1948 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1949 {
1950         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1951         unsigned int i;
1952         bool added = false;
1953         ASSERT(curr->is_write());
1954
1955         if (curr->is_seqcst()) {
1956                 /* We have to at least see the last sequentially consistent write,
1957                          so we are initialized. */
1958                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1959                 if (last_seq_cst != NULL) {
1960                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1961                 }
1962         }
1963
1964         /* Last SC fence in the current thread */
1965         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1966
1967         /* Iterate over all threads */
1968         for (i = 0; i < thrd_lists->size(); i++) {
1969                 /* Last SC fence in thread i, before last SC fence in current thread */
1970                 ModelAction *last_sc_fence_thread_before = NULL;
1971                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1972                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1973
1974                 /* Iterate over actions in thread, starting from most recent */
1975                 action_list_t *list = &(*thrd_lists)[i];
1976                 action_list_t::reverse_iterator rit;
1977                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1978                         ModelAction *act = *rit;
1979                         if (act == curr) {
1980                                 /*
1981                                  * 1) If RMW and it actually read from something, then we
1982                                  * already have all relevant edges, so just skip to next
1983                                  * thread.
1984                                  *
1985                                  * 2) If RMW and it didn't read from anything, we should
1986                                  * whatever edge we can get to speed up convergence.
1987                                  *
1988                                  * 3) If normal write, we need to look at earlier actions, so
1989                                  * continue processing list.
1990                                  */
1991                                 if (curr->is_rmw()) {
1992                                         if (curr->get_reads_from() != NULL)
1993                                                 break;
1994                                         else
1995                                                 continue;
1996                                 } else
1997                                         continue;
1998                         }
1999
2000                         /* C++, Section 29.3 statement 7 */
2001                         if (last_sc_fence_thread_before && act->is_write() &&
2002                                         *act < *last_sc_fence_thread_before) {
2003                                 added = mo_graph->addEdge(act, curr) || added;
2004                                 break;
2005                         }
2006
2007                         /*
2008                          * Include at most one act per-thread that "happens
2009                          * before" curr
2010                          */
2011                         if (act->happens_before(curr)) {
2012                                 /*
2013                                  * Note: if act is RMW, just add edge:
2014                                  *   act --mo--> curr
2015                                  * The following edge should be handled elsewhere:
2016                                  *   readfrom(act) --mo--> act
2017                                  */
2018                                 if (act->is_write())
2019                                         added = mo_graph->addEdge(act, curr) || added;
2020                                 else if (act->is_read()) {
2021                                         //if previous read accessed a null, just keep going
2022                                         if (act->get_reads_from() == NULL)
2023                                                 continue;
2024                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2025                                 }
2026                                 break;
2027                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2028                                                      !act->same_thread(curr)) {
2029                                 /* We have an action that:
2030                                    (1) did not happen before us
2031                                    (2) is a read and we are a write
2032                                    (3) cannot synchronize with us
2033                                    (4) is in a different thread
2034                                    =>
2035                                    that read could potentially read from our write.  Note that
2036                                    these checks are overly conservative at this point, we'll
2037                                    do more checks before actually removing the
2038                                    pendingfuturevalue.
2039
2040                                  */
2041                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2042                                         if (!is_infeasible())
2043                                                 send_fv->push_back(act);
2044                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2045                                                 add_future_value(curr, act);
2046                                 }
2047                         }
2048                 }
2049         }
2050
2051         /*
2052          * All compatible, thread-exclusive promises must be ordered after any
2053          * concrete stores to the same thread, or else they can be merged with
2054          * this store later
2055          */
2056         for (unsigned int i = 0; i < promises->size(); i++)
2057                 if ((*promises)[i]->is_compatible_exclusive(curr))
2058                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2059
2060         return added;
2061 }
2062
2063 /** Arbitrary reads from the future are not allowed.  Section 29.3
2064  * part 9 places some constraints.  This method checks one result of constraint
2065  * constraint.  Others require compiler support. */
2066 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2067 {
2068         if (!writer->is_rmw())
2069                 return true;
2070
2071         if (!reader->is_rmw())
2072                 return true;
2073
2074         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2075                 if (search == reader)
2076                         return false;
2077                 if (search->get_tid() == reader->get_tid() &&
2078                                 search->happens_before(reader))
2079                         break;
2080         }
2081
2082         return true;
2083 }
2084
2085 /**
2086  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2087  * some constraints. This method checks one the following constraint (others
2088  * require compiler support):
2089  *
2090  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2091  */
2092 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2093 {
2094         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2095         unsigned int i;
2096         /* Iterate over all threads */
2097         for (i = 0; i < thrd_lists->size(); i++) {
2098                 const ModelAction *write_after_read = NULL;
2099
2100                 /* Iterate over actions in thread, starting from most recent */
2101                 action_list_t *list = &(*thrd_lists)[i];
2102                 action_list_t::reverse_iterator rit;
2103                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2104                         ModelAction *act = *rit;
2105
2106                         /* Don't disallow due to act == reader */
2107                         if (!reader->happens_before(act) || reader == act)
2108                                 break;
2109                         else if (act->is_write())
2110                                 write_after_read = act;
2111                         else if (act->is_read() && act->get_reads_from() != NULL)
2112                                 write_after_read = act->get_reads_from();
2113                 }
2114
2115                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2116                         return false;
2117         }
2118         return true;
2119 }
2120
2121 /**
2122  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2123  * The ModelAction under consideration is expected to be taking part in
2124  * release/acquire synchronization as an object of the "reads from" relation.
2125  * Note that this can only provide release sequence support for RMW chains
2126  * which do not read from the future, as those actions cannot be traced until
2127  * their "promise" is fulfilled. Similarly, we may not even establish the
2128  * presence of a release sequence with certainty, as some modification order
2129  * constraints may be decided further in the future. Thus, this function
2130  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2131  * and a boolean representing certainty.
2132  *
2133  * @param rf The action that might be part of a release sequence. Must be a
2134  * write.
2135  * @param release_heads A pass-by-reference style return parameter. After
2136  * execution of this function, release_heads will contain the heads of all the
2137  * relevant release sequences, if any exists with certainty
2138  * @param pending A pass-by-reference style return parameter which is only used
2139  * when returning false (i.e., uncertain). Returns most information regarding
2140  * an uncertain release sequence, including any write operations that might
2141  * break the sequence.
2142  * @return true, if the ModelChecker is certain that release_heads is complete;
2143  * false otherwise
2144  */
2145 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2146                 rel_heads_list_t *release_heads,
2147                 struct release_seq *pending) const
2148 {
2149         /* Only check for release sequences if there are no cycles */
2150         if (mo_graph->checkForCycles())
2151                 return false;
2152
2153         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2154                 ASSERT(rf->is_write());
2155
2156                 if (rf->is_release())
2157                         release_heads->push_back(rf);
2158                 else if (rf->get_last_fence_release())
2159                         release_heads->push_back(rf->get_last_fence_release());
2160                 if (!rf->is_rmw())
2161                         break; /* End of RMW chain */
2162
2163                 /** @todo Need to be smarter here...  In the linux lock
2164                  * example, this will run to the beginning of the program for
2165                  * every acquire. */
2166                 /** @todo The way to be smarter here is to keep going until 1
2167                  * thread has a release preceded by an acquire and you've seen
2168                  *       both. */
2169
2170                 /* acq_rel RMW is a sufficient stopping condition */
2171                 if (rf->is_acquire() && rf->is_release())
2172                         return true; /* complete */
2173         };
2174         if (!rf) {
2175                 /* read from future: need to settle this later */
2176                 pending->rf = NULL;
2177                 return false; /* incomplete */
2178         }
2179
2180         if (rf->is_release())
2181                 return true; /* complete */
2182
2183         /* else relaxed write
2184          * - check for fence-release in the same thread (29.8, stmt. 3)
2185          * - check modification order for contiguous subsequence
2186          *   -> rf must be same thread as release */
2187
2188         const ModelAction *fence_release = rf->get_last_fence_release();
2189         /* Synchronize with a fence-release unconditionally; we don't need to
2190          * find any more "contiguous subsequence..." for it */
2191         if (fence_release)
2192                 release_heads->push_back(fence_release);
2193
2194         int tid = id_to_int(rf->get_tid());
2195         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2196         action_list_t *list = &(*thrd_lists)[tid];
2197         action_list_t::const_reverse_iterator rit;
2198
2199         /* Find rf in the thread list */
2200         rit = std::find(list->rbegin(), list->rend(), rf);
2201         ASSERT(rit != list->rend());
2202
2203         /* Find the last {write,fence}-release */
2204         for (; rit != list->rend(); rit++) {
2205                 if (fence_release && *(*rit) < *fence_release)
2206                         break;
2207                 if ((*rit)->is_release())
2208                         break;
2209         }
2210         if (rit == list->rend()) {
2211                 /* No write-release in this thread */
2212                 return true; /* complete */
2213         } else if (fence_release && *(*rit) < *fence_release) {
2214                 /* The fence-release is more recent (and so, "stronger") than
2215                  * the most recent write-release */
2216                 return true; /* complete */
2217         } /* else, need to establish contiguous release sequence */
2218         ModelAction *release = *rit;
2219
2220         ASSERT(rf->same_thread(release));
2221
2222         pending->writes.clear();
2223
2224         bool certain = true;
2225         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2226                 if (id_to_int(rf->get_tid()) == (int)i)
2227                         continue;
2228                 list = &(*thrd_lists)[i];
2229
2230                 /* Can we ensure no future writes from this thread may break
2231                  * the release seq? */
2232                 bool future_ordered = false;
2233
2234                 ModelAction *last = get_last_action(int_to_id(i));
2235                 Thread *th = get_thread(int_to_id(i));
2236                 if ((last && rf->happens_before(last)) ||
2237                                 !is_enabled(th) ||
2238                                 th->is_complete())
2239                         future_ordered = true;
2240
2241                 ASSERT(!th->is_model_thread() || future_ordered);
2242
2243                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2244                         const ModelAction *act = *rit;
2245                         /* Reach synchronization -> this thread is complete */
2246                         if (act->happens_before(release))
2247                                 break;
2248                         if (rf->happens_before(act)) {
2249                                 future_ordered = true;
2250                                 continue;
2251                         }
2252
2253                         /* Only non-RMW writes can break release sequences */
2254                         if (!act->is_write() || act->is_rmw())
2255                                 continue;
2256
2257                         /* Check modification order */
2258                         if (mo_graph->checkReachable(rf, act)) {
2259                                 /* rf --mo--> act */
2260                                 future_ordered = true;
2261                                 continue;
2262                         }
2263                         if (mo_graph->checkReachable(act, release))
2264                                 /* act --mo--> release */
2265                                 break;
2266                         if (mo_graph->checkReachable(release, act) &&
2267                                       mo_graph->checkReachable(act, rf)) {
2268                                 /* release --mo-> act --mo--> rf */
2269                                 return true; /* complete */
2270                         }
2271                         /* act may break release sequence */
2272                         pending->writes.push_back(act);
2273                         certain = false;
2274                 }
2275                 if (!future_ordered)
2276                         certain = false; /* This thread is uncertain */
2277         }
2278
2279         if (certain) {
2280                 release_heads->push_back(release);
2281                 pending->writes.clear();
2282         } else {
2283                 pending->release = release;
2284                 pending->rf = rf;
2285         }
2286         return certain;
2287 }
2288
2289 /**
2290  * An interface for getting the release sequence head(s) with which a
2291  * given ModelAction must synchronize. This function only returns a non-empty
2292  * result when it can locate a release sequence head with certainty. Otherwise,
2293  * it may mark the internal state of the ModelChecker so that it will handle
2294  * the release sequence at a later time, causing @a acquire to update its
2295  * synchronization at some later point in execution.
2296  *
2297  * @param acquire The 'acquire' action that may synchronize with a release
2298  * sequence
2299  * @param read The read action that may read from a release sequence; this may
2300  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2301  * when 'acquire' is a fence-acquire)
2302  * @param release_heads A pass-by-reference return parameter. Will be filled
2303  * with the head(s) of the release sequence(s), if they exists with certainty.
2304  * @see ModelChecker::release_seq_heads
2305  */
2306 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2307                 ModelAction *read, rel_heads_list_t *release_heads)
2308 {
2309         const ModelAction *rf = read->get_reads_from();
2310         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2311         sequence->acquire = acquire;
2312         sequence->read = read;
2313
2314         if (!release_seq_heads(rf, release_heads, sequence)) {
2315                 /* add act to 'lazy checking' list */
2316                 pending_rel_seqs->push_back(sequence);
2317         } else {
2318                 snapshot_free(sequence);
2319         }
2320 }
2321
2322 /**
2323  * Attempt to resolve all stashed operations that might synchronize with a
2324  * release sequence for a given location. This implements the "lazy" portion of
2325  * determining whether or not a release sequence was contiguous, since not all
2326  * modification order information is present at the time an action occurs.
2327  *
2328  * @param location The location/object that should be checked for release
2329  * sequence resolutions. A NULL value means to check all locations.
2330  * @param work_queue The work queue to which to add work items as they are
2331  * generated
2332  * @return True if any updates occurred (new synchronization, new mo_graph
2333  * edges)
2334  */
2335 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2336 {
2337         bool updated = false;
2338         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2339         while (it != pending_rel_seqs->end()) {
2340                 struct release_seq *pending = *it;
2341                 ModelAction *acquire = pending->acquire;
2342                 const ModelAction *read = pending->read;
2343
2344                 /* Only resolve sequences on the given location, if provided */
2345                 if (location && read->get_location() != location) {
2346                         it++;
2347                         continue;
2348                 }
2349
2350                 const ModelAction *rf = read->get_reads_from();
2351                 rel_heads_list_t release_heads;
2352                 bool complete;
2353                 complete = release_seq_heads(rf, &release_heads, pending);
2354                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2355                         if (!acquire->has_synchronized_with(release_heads[i])) {
2356                                 if (acquire->synchronize_with(release_heads[i]))
2357                                         updated = true;
2358                                 else
2359                                         set_bad_synchronization();
2360                         }
2361                 }
2362
2363                 if (updated) {
2364                         /* Re-check all pending release sequences */
2365                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2366                         /* Re-check read-acquire for mo_graph edges */
2367                         if (acquire->is_read())
2368                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2369
2370                         /* propagate synchronization to later actions */
2371                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2372                         for (; (*rit) != acquire; rit++) {
2373                                 ModelAction *propagate = *rit;
2374                                 if (acquire->happens_before(propagate)) {
2375                                         propagate->synchronize_with(acquire);
2376                                         /* Re-check 'propagate' for mo_graph edges */
2377                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2378                                 }
2379                         }
2380                 }
2381                 if (complete) {
2382                         it = pending_rel_seqs->erase(it);
2383                         snapshot_free(pending);
2384                 } else {
2385                         it++;
2386                 }
2387         }
2388
2389         // If we resolved promises or data races, see if we have realized a data race.
2390         checkDataRaces();
2391
2392         return updated;
2393 }
2394
2395 /**
2396  * Performs various bookkeeping operations for the current ModelAction. For
2397  * instance, adds action to the per-object, per-thread action vector and to the
2398  * action trace list of all thread actions.
2399  *
2400  * @param act is the ModelAction to add.
2401  */
2402 void ModelChecker::add_action_to_lists(ModelAction *act)
2403 {
2404         int tid = id_to_int(act->get_tid());
2405         ModelAction *uninit = NULL;
2406         int uninit_id = -1;
2407         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2408         if (list->empty() && act->is_atomic_var()) {
2409                 uninit = get_uninitialized_action(act);
2410                 uninit_id = id_to_int(uninit->get_tid());
2411                 list->push_front(uninit);
2412         }
2413         list->push_back(act);
2414
2415         action_trace->push_back(act);
2416         if (uninit)
2417                 action_trace->push_front(uninit);
2418
2419         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2420         if (tid >= (int)vec->size())
2421                 vec->resize(priv->next_thread_id);
2422         (*vec)[tid].push_back(act);
2423         if (uninit)
2424                 (*vec)[uninit_id].push_front(uninit);
2425
2426         if ((int)thrd_last_action->size() <= tid)
2427                 thrd_last_action->resize(get_num_threads());
2428         (*thrd_last_action)[tid] = act;
2429         if (uninit)
2430                 (*thrd_last_action)[uninit_id] = uninit;
2431
2432         if (act->is_fence() && act->is_release()) {
2433                 if ((int)thrd_last_fence_release->size() <= tid)
2434                         thrd_last_fence_release->resize(get_num_threads());
2435                 (*thrd_last_fence_release)[tid] = act;
2436         }
2437
2438         if (act->is_wait()) {
2439                 void *mutex_loc = (void *) act->get_value();
2440                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2441
2442                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2443                 if (tid >= (int)vec->size())
2444                         vec->resize(priv->next_thread_id);
2445                 (*vec)[tid].push_back(act);
2446         }
2447 }
2448
2449 /**
2450  * @brief Get the last action performed by a particular Thread
2451  * @param tid The thread ID of the Thread in question
2452  * @return The last action in the thread
2453  */
2454 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2455 {
2456         int threadid = id_to_int(tid);
2457         if (threadid < (int)thrd_last_action->size())
2458                 return (*thrd_last_action)[id_to_int(tid)];
2459         else
2460                 return NULL;
2461 }
2462
2463 /**
2464  * @brief Get the last fence release performed by a particular Thread
2465  * @param tid The thread ID of the Thread in question
2466  * @return The last fence release in the thread, if one exists; NULL otherwise
2467  */
2468 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2469 {
2470         int threadid = id_to_int(tid);
2471         if (threadid < (int)thrd_last_fence_release->size())
2472                 return (*thrd_last_fence_release)[id_to_int(tid)];
2473         else
2474                 return NULL;
2475 }
2476
2477 /**
2478  * Gets the last memory_order_seq_cst write (in the total global sequence)
2479  * performed on a particular object (i.e., memory location), not including the
2480  * current action.
2481  * @param curr The current ModelAction; also denotes the object location to
2482  * check
2483  * @return The last seq_cst write
2484  */
2485 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2486 {
2487         void *location = curr->get_location();
2488         action_list_t *list = get_safe_ptr_action(obj_map, location);
2489         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2490         action_list_t::reverse_iterator rit;
2491         for (rit = list->rbegin(); (*rit) != curr; rit++)
2492                 ;
2493         rit++; /* Skip past curr */
2494         for ( ; rit != list->rend(); rit++)
2495                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2496                         return *rit;
2497         return NULL;
2498 }
2499
2500 /**
2501  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2502  * performed in a particular thread, prior to a particular fence.
2503  * @param tid The ID of the thread to check
2504  * @param before_fence The fence from which to begin the search; if NULL, then
2505  * search for the most recent fence in the thread.
2506  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2507  */
2508 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2509 {
2510         /* All fences should have NULL location */
2511         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2512         action_list_t::reverse_iterator rit = list->rbegin();
2513
2514         if (before_fence) {
2515                 for (; rit != list->rend(); rit++)
2516                         if (*rit == before_fence)
2517                                 break;
2518
2519                 ASSERT(*rit == before_fence);
2520                 rit++;
2521         }
2522
2523         for (; rit != list->rend(); rit++)
2524                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2525                         return *rit;
2526         return NULL;
2527 }
2528
2529 /**
2530  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2531  * location). This function identifies the mutex according to the current
2532  * action, which is presumed to perform on the same mutex.
2533  * @param curr The current ModelAction; also denotes the object location to
2534  * check
2535  * @return The last unlock operation
2536  */
2537 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2538 {
2539         void *location = curr->get_location();
2540         action_list_t *list = get_safe_ptr_action(obj_map, location);
2541         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2542         action_list_t::reverse_iterator rit;
2543         for (rit = list->rbegin(); rit != list->rend(); rit++)
2544                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2545                         return *rit;
2546         return NULL;
2547 }
2548
2549 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2550 {
2551         ModelAction *parent = get_last_action(tid);
2552         if (!parent)
2553                 parent = get_thread(tid)->get_creation();
2554         return parent;
2555 }
2556
2557 /**
2558  * Returns the clock vector for a given thread.
2559  * @param tid The thread whose clock vector we want
2560  * @return Desired clock vector
2561  */
2562 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2563 {
2564         return get_parent_action(tid)->get_cv();
2565 }
2566
2567 /**
2568  * @brief Find the promise (if any) to resolve for the current action and
2569  * remove it from the pending promise vector
2570  * @param curr The current ModelAction. Should be a write.
2571  * @return The Promise to resolve, if any; otherwise NULL
2572  */
2573 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2574 {
2575         for (unsigned int i = 0; i < promises->size(); i++)
2576                 if (curr->get_node()->get_promise(i)) {
2577                         Promise *ret = (*promises)[i];
2578                         promises->erase(promises->begin() + i);
2579                         return ret;
2580                 }
2581         return NULL;
2582 }
2583
2584 /**
2585  * Resolve a Promise with a current write.
2586  * @param write The ModelAction that is fulfilling Promises
2587  * @param promise The Promise to resolve
2588  * @return True if the Promise was successfully resolved; false otherwise
2589  */
2590 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2591 {
2592         ModelVector<ModelAction *> actions_to_check;
2593
2594         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2595                 ModelAction *read = promise->get_reader(i);
2596                 read_from(read, write);
2597                 actions_to_check.push_back(read);
2598         }
2599         /* Make sure the promise's value matches the write's value */
2600         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2601         if (!mo_graph->resolvePromise(promise, write))
2602                 priv->failed_promise = true;
2603
2604         /**
2605          * @todo  It is possible to end up in an inconsistent state, where a
2606          * "resolved" promise may still be referenced if
2607          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2608          *
2609          * Note that the inconsistency only matters when dumping mo_graph to
2610          * file.
2611          *
2612          * delete promise;
2613          */
2614
2615         //Check whether reading these writes has made threads unable to
2616         //resolve promises
2617         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2618                 ModelAction *read = actions_to_check[i];
2619                 mo_check_promises(read, true);
2620         }
2621
2622         return true;
2623 }
2624
2625 /**
2626  * Compute the set of promises that could potentially be satisfied by this
2627  * action. Note that the set computation actually appears in the Node, not in
2628  * ModelChecker.
2629  * @param curr The ModelAction that may satisfy promises
2630  */
2631 void ModelChecker::compute_promises(ModelAction *curr)
2632 {
2633         for (unsigned int i = 0; i < promises->size(); i++) {
2634                 Promise *promise = (*promises)[i];
2635                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2636                         continue;
2637
2638                 bool satisfy = true;
2639                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2640                         const ModelAction *act = promise->get_reader(j);
2641                         if (act->happens_before(curr) ||
2642                                         act->could_synchronize_with(curr)) {
2643                                 satisfy = false;
2644                                 break;
2645                         }
2646                 }
2647                 if (satisfy)
2648                         curr->get_node()->set_promise(i);
2649         }
2650 }
2651
2652 /** Checks promises in response to change in ClockVector Threads. */
2653 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2654 {
2655         for (unsigned int i = 0; i < promises->size(); i++) {
2656                 Promise *promise = (*promises)[i];
2657                 if (!promise->thread_is_available(tid))
2658                         continue;
2659                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2660                         const ModelAction *act = promise->get_reader(j);
2661                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2662                                         merge_cv->synchronized_since(act)) {
2663                                 if (promise->eliminate_thread(tid)) {
2664                                         /* Promise has failed */
2665                                         priv->failed_promise = true;
2666                                         return;
2667                                 }
2668                         }
2669                 }
2670         }
2671 }
2672
2673 void ModelChecker::check_promises_thread_disabled()
2674 {
2675         for (unsigned int i = 0; i < promises->size(); i++) {
2676                 Promise *promise = (*promises)[i];
2677                 if (promise->has_failed()) {
2678                         priv->failed_promise = true;
2679                         return;
2680                 }
2681         }
2682 }
2683
2684 /**
2685  * @brief Checks promises in response to addition to modification order for
2686  * threads.
2687  *
2688  * We test whether threads are still available for satisfying promises after an
2689  * addition to our modification order constraints. Those that are unavailable
2690  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2691  * that promise has failed.
2692  *
2693  * @param act The ModelAction which updated the modification order
2694  * @param is_read_check Should be true if act is a read and we must check for
2695  * updates to the store from which it read (there is a distinction here for
2696  * RMW's, which are both a load and a store)
2697  */
2698 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2699 {
2700         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2701
2702         for (unsigned int i = 0; i < promises->size(); i++) {
2703                 Promise *promise = (*promises)[i];
2704
2705                 // Is this promise on the same location?
2706                 if (!promise->same_location(write))
2707                         continue;
2708
2709                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2710                         const ModelAction *pread = promise->get_reader(j);
2711                         if (!pread->happens_before(act))
2712                                continue;
2713                         if (mo_graph->checkPromise(write, promise)) {
2714                                 priv->failed_promise = true;
2715                                 return;
2716                         }
2717                         break;
2718                 }
2719
2720                 // Don't do any lookups twice for the same thread
2721                 if (!promise->thread_is_available(act->get_tid()))
2722                         continue;
2723
2724                 if (mo_graph->checkReachable(promise, write)) {
2725                         if (mo_graph->checkPromise(write, promise)) {
2726                                 priv->failed_promise = true;
2727                                 return;
2728                         }
2729                 }
2730         }
2731 }
2732
2733 /**
2734  * Compute the set of writes that may break the current pending release
2735  * sequence. This information is extracted from previou release sequence
2736  * calculations.
2737  *
2738  * @param curr The current ModelAction. Must be a release sequence fixup
2739  * action.
2740  */
2741 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2742 {
2743         if (pending_rel_seqs->empty())
2744                 return;
2745
2746         struct release_seq *pending = pending_rel_seqs->back();
2747         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2748                 const ModelAction *write = pending->writes[i];
2749                 curr->get_node()->add_relseq_break(write);
2750         }
2751
2752         /* NULL means don't break the sequence; just synchronize */
2753         curr->get_node()->add_relseq_break(NULL);
2754 }
2755
2756 /**
2757  * Build up an initial set of all past writes that this 'read' action may read
2758  * from, as well as any previously-observed future values that must still be valid.
2759  *
2760  * @param curr is the current ModelAction that we are exploring; it must be a
2761  * 'read' operation.
2762  */
2763 void ModelChecker::build_may_read_from(ModelAction *curr)
2764 {
2765         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2766         unsigned int i;
2767         ASSERT(curr->is_read());
2768
2769         ModelAction *last_sc_write = NULL;
2770
2771         if (curr->is_seqcst())
2772                 last_sc_write = get_last_seq_cst_write(curr);
2773
2774         /* Iterate over all threads */
2775         for (i = 0; i < thrd_lists->size(); i++) {
2776                 /* Iterate over actions in thread, starting from most recent */
2777                 action_list_t *list = &(*thrd_lists)[i];
2778                 action_list_t::reverse_iterator rit;
2779                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2780                         ModelAction *act = *rit;
2781
2782                         /* Only consider 'write' actions */
2783                         if (!act->is_write() || act == curr)
2784                                 continue;
2785
2786                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2787                         bool allow_read = true;
2788
2789                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2790                                 allow_read = false;
2791                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2792                                 allow_read = false;
2793
2794                         if (allow_read) {
2795                                 /* Only add feasible reads */
2796                                 mo_graph->startChanges();
2797                                 r_modification_order(curr, act);
2798                                 if (!is_infeasible())
2799                                         curr->get_node()->add_read_from_past(act);
2800                                 mo_graph->rollbackChanges();
2801                         }
2802
2803                         /* Include at most one act per-thread that "happens before" curr */
2804                         if (act->happens_before(curr))
2805                                 break;
2806                 }
2807         }
2808
2809         /* Inherit existing, promised future values */
2810         for (i = 0; i < promises->size(); i++) {
2811                 const Promise *promise = (*promises)[i];
2812                 const ModelAction *promise_read = promise->get_reader(0);
2813                 if (promise_read->same_var(curr)) {
2814                         /* Only add feasible future-values */
2815                         mo_graph->startChanges();
2816                         r_modification_order(curr, promise);
2817                         if (!is_infeasible())
2818                                 curr->get_node()->add_read_from_promise(promise_read);
2819                         mo_graph->rollbackChanges();
2820                 }
2821         }
2822
2823         /* We may find no valid may-read-from only if the execution is doomed */
2824         if (!curr->get_node()->read_from_size()) {
2825                 priv->no_valid_reads = true;
2826                 set_assert();
2827         }
2828
2829         if (DBG_ENABLED()) {
2830                 model_print("Reached read action:\n");
2831                 curr->print();
2832                 model_print("Printing read_from_past\n");
2833                 curr->get_node()->print_read_from_past();
2834                 model_print("End printing read_from_past\n");
2835         }
2836 }
2837
2838 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2839 {
2840         for ( ; write != NULL; write = write->get_reads_from()) {
2841                 /* UNINIT actions don't have a Node, and they never sleep */
2842                 if (write->is_uninitialized())
2843                         return true;
2844                 Node *prevnode = write->get_node()->get_parent();
2845
2846                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2847                 if (write->is_release() && thread_sleep)
2848                         return true;
2849                 if (!write->is_rmw())
2850                         return false;
2851         }
2852         return true;
2853 }
2854
2855 /**
2856  * @brief Get an action representing an uninitialized atomic
2857  *
2858  * This function may create a new one or try to retrieve one from the NodeStack
2859  *
2860  * @param curr The current action, which prompts the creation of an UNINIT action
2861  * @return A pointer to the UNINIT ModelAction
2862  */
2863 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2864 {
2865         Node *node = curr->get_node();
2866         ModelAction *act = node->get_uninit_action();
2867         if (!act) {
2868                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2869                 node->set_uninit_action(act);
2870         }
2871         act->create_cv(NULL);
2872         return act;
2873 }
2874
2875 static void print_list(action_list_t *list)
2876 {
2877         action_list_t::iterator it;
2878
2879         model_print("---------------------------------------------------------------------\n");
2880
2881         unsigned int hash = 0;
2882
2883         for (it = list->begin(); it != list->end(); it++) {
2884                 const ModelAction *act = *it;
2885                 if (act->get_seq_number() > 0)
2886                         act->print();
2887                 hash = hash^(hash<<3)^((*it)->hash());
2888         }
2889         model_print("HASH %u\n", hash);
2890         model_print("---------------------------------------------------------------------\n");
2891 }
2892
2893 #if SUPPORT_MOD_ORDER_DUMP
2894 void ModelChecker::dumpGraph(char *filename) const
2895 {
2896         char buffer[200];
2897         sprintf(buffer, "%s.dot", filename);
2898         FILE *file = fopen(buffer, "w");
2899         fprintf(file, "digraph %s {\n", filename);
2900         mo_graph->dumpNodes(file);
2901         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2902
2903         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2904                 ModelAction *act = *it;
2905                 if (act->is_read()) {
2906                         mo_graph->dot_print_node(file, act);
2907                         if (act->get_reads_from())
2908                                 mo_graph->dot_print_edge(file,
2909                                                 act->get_reads_from(),
2910                                                 act,
2911                                                 "label=\"rf\", color=red, weight=2");
2912                         else
2913                                 mo_graph->dot_print_edge(file,
2914                                                 act->get_reads_from_promise(),
2915                                                 act,
2916                                                 "label=\"rf\", color=red");
2917                 }
2918                 if (thread_array[act->get_tid()]) {
2919                         mo_graph->dot_print_edge(file,
2920                                         thread_array[id_to_int(act->get_tid())],
2921                                         act,
2922                                         "label=\"sb\", color=blue, weight=400");
2923                 }
2924
2925                 thread_array[act->get_tid()] = act;
2926         }
2927         fprintf(file, "}\n");
2928         model_free(thread_array);
2929         fclose(file);
2930 }
2931 #endif
2932
2933 /** @brief Prints an execution trace summary. */
2934 void ModelChecker::print_summary() const
2935 {
2936 #if SUPPORT_MOD_ORDER_DUMP
2937         char buffername[100];
2938         sprintf(buffername, "exec%04u", stats.num_total);
2939         mo_graph->dumpGraphToFile(buffername);
2940         sprintf(buffername, "graph%04u", stats.num_total);
2941         dumpGraph(buffername);
2942 #endif
2943
2944         model_print("Execution %d:", stats.num_total);
2945         if (isfeasibleprefix()) {
2946                 if (scheduler->all_threads_sleeping())
2947                         model_print(" SLEEP-SET REDUNDANT");
2948                 model_print("\n");
2949         } else
2950                 print_infeasibility(" INFEASIBLE");
2951         print_list(action_trace);
2952         model_print("\n");
2953         if (!promises->empty()) {
2954                 model_print("Pending promises:\n");
2955                 for (unsigned int i = 0; i < promises->size(); i++) {
2956                         model_print(" [P%u] ", i);
2957                         (*promises)[i]->print();
2958                 }
2959                 model_print("\n");
2960         }
2961 }
2962
2963 /**
2964  * Add a Thread to the system for the first time. Should only be called once
2965  * per thread.
2966  * @param t The Thread to add
2967  */
2968 void ModelChecker::add_thread(Thread *t)
2969 {
2970         thread_map->put(id_to_int(t->get_id()), t);
2971         scheduler->add_thread(t);
2972 }
2973
2974 /**
2975  * @brief Get a Thread reference by its ID
2976  * @param tid The Thread's ID
2977  * @return A Thread reference
2978  */
2979 Thread * ModelChecker::get_thread(thread_id_t tid) const
2980 {
2981         return thread_map->get(id_to_int(tid));
2982 }
2983
2984 /**
2985  * @brief Get a reference to the Thread in which a ModelAction was executed
2986  * @param act The ModelAction
2987  * @return A Thread reference
2988  */
2989 Thread * ModelChecker::get_thread(const ModelAction *act) const
2990 {
2991         return get_thread(act->get_tid());
2992 }
2993
2994 /**
2995  * @brief Get a Promise's "promise number"
2996  *
2997  * A "promise number" is an index number that is unique to a promise, valid
2998  * only for a specific snapshot of an execution trace. Promises may come and go
2999  * as they are generated an resolved, so an index only retains meaning for the
3000  * current snapshot.
3001  *
3002  * @param promise The Promise to check
3003  * @return The promise index, if the promise still is valid; otherwise -1
3004  */
3005 int ModelChecker::get_promise_number(const Promise *promise) const
3006 {
3007         for (unsigned int i = 0; i < promises->size(); i++)
3008                 if ((*promises)[i] == promise)
3009                         return i;
3010         /* Not found */
3011         return -1;
3012 }
3013
3014 /**
3015  * @brief Check if a Thread is currently enabled
3016  * @param t The Thread to check
3017  * @return True if the Thread is currently enabled
3018  */
3019 bool ModelChecker::is_enabled(Thread *t) const
3020 {
3021         return scheduler->is_enabled(t);
3022 }
3023
3024 /**
3025  * @brief Check if a Thread is currently enabled
3026  * @param tid The ID of the Thread to check
3027  * @return True if the Thread is currently enabled
3028  */
3029 bool ModelChecker::is_enabled(thread_id_t tid) const
3030 {
3031         return scheduler->is_enabled(tid);
3032 }
3033
3034 /**
3035  * Switch from a model-checker context to a user-thread context. This is the
3036  * complement of ModelChecker::switch_to_master and must be called from the
3037  * model-checker context
3038  *
3039  * @param thread The user-thread to switch to
3040  */
3041 void ModelChecker::switch_from_master(Thread *thread)
3042 {
3043         scheduler->set_current_thread(thread);
3044         Thread::swap(&system_context, thread);
3045 }
3046
3047 /**
3048  * Switch from a user-context to the "master thread" context (a.k.a. system
3049  * context). This switch is made with the intention of exploring a particular
3050  * model-checking action (described by a ModelAction object). Must be called
3051  * from a user-thread context.
3052  *
3053  * @param act The current action that will be explored. May be NULL only if
3054  * trace is exiting via an assertion (see ModelChecker::set_assert and
3055  * ModelChecker::has_asserted).
3056  * @return Return the value returned by the current action
3057  */
3058 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3059 {
3060         DBG();
3061         Thread *old = thread_current();
3062         scheduler->set_current_thread(NULL);
3063         ASSERT(!old->get_pending());
3064         old->set_pending(act);
3065         if (Thread::swap(old, &system_context) < 0) {
3066                 perror("swap threads");
3067                 exit(EXIT_FAILURE);
3068         }
3069         return old->get_return_value();
3070 }
3071
3072 /**
3073  * Takes the next step in the execution, if possible.
3074  * @param curr The current step to take
3075  * @return Returns the next Thread to run, if any; NULL if this execution
3076  * should terminate
3077  */
3078 Thread * ModelChecker::take_step(ModelAction *curr)
3079 {
3080         Thread *curr_thrd = get_thread(curr);
3081         ASSERT(curr_thrd->get_state() == THREAD_READY);
3082
3083         curr = check_current_action(curr);
3084
3085         /* Infeasible -> don't take any more steps */
3086         if (is_infeasible())
3087                 return NULL;
3088         else if (isfeasibleprefix() && have_bug_reports()) {
3089                 set_assert();
3090                 return NULL;
3091         }
3092
3093         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3094                 return NULL;
3095
3096         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3097                 scheduler->remove_thread(curr_thrd);
3098
3099         Thread *next_thrd = NULL;
3100         if (curr)
3101                 next_thrd = action_select_next_thread(curr);
3102         if (!next_thrd)
3103                 next_thrd = get_next_thread();
3104
3105         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3106                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3107
3108         return next_thrd;
3109 }
3110
3111 /** Wrapper to run the user's main function, with appropriate arguments */
3112 void user_main_wrapper(void *)
3113 {
3114         user_main(model->params.argc, model->params.argv);
3115 }
3116
3117 /** @brief Run ModelChecker for the user program */
3118 void ModelChecker::run()
3119 {
3120         do {
3121                 thrd_t user_thread;
3122                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3123                 add_thread(t);
3124
3125                 do {
3126                         /*
3127                          * Stash next pending action(s) for thread(s). There
3128                          * should only need to stash one thread's action--the
3129                          * thread which just took a step--plus the first step
3130                          * for any newly-created thread
3131                          */
3132                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3133                                 thread_id_t tid = int_to_id(i);
3134                                 Thread *thr = get_thread(tid);
3135                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3136                                         switch_from_master(thr);
3137                                         if (thr->is_waiting_on(thr))
3138                                                 assert_bug("Deadlock detected");
3139                                 }
3140                         }
3141
3142                         /* Catch assertions from prior take_step or from
3143                          * between-ModelAction bugs (e.g., data races) */
3144                         if (has_asserted())
3145                                 break;
3146
3147                         /* Consume the next action for a Thread */
3148                         ModelAction *curr = t->get_pending();
3149                         t->set_pending(NULL);
3150                         t = take_step(curr);
3151                 } while (t && !t->is_model_thread());
3152
3153                 /*
3154                  * Launch end-of-execution release sequence fixups only when
3155                  * the execution is otherwise feasible AND there are:
3156                  *
3157                  * (1) pending release sequences
3158                  * (2) pending assertions that could be invalidated by a change
3159                  * in clock vectors (i.e., data races)
3160                  * (3) no pending promises
3161                  */
3162                 while (!pending_rel_seqs->empty() &&
3163                                 is_feasible_prefix_ignore_relseq() &&
3164                                 !unrealizedraces.empty()) {
3165                         model_print("*** WARNING: release sequence fixup action "
3166                                         "(%zu pending release seuqence(s)) ***\n",
3167                                         pending_rel_seqs->size());
3168                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3169                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3170                                         model_thread);
3171                         take_step(fixup);
3172                 };
3173         } while (next_execution());
3174
3175         model_print("******* Model-checking complete: *******\n");
3176         print_stats();
3177 }