e016ae49bc9cb212638cbff3ae59ea22b7faa8f6
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else {
487                 stats.num_redundant++;
488
489                 /**
490                  * @todo We can violate this ASSERT() when fairness/sleep sets
491                  * conflict to cause an execution to terminate, e.g. with:
492                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
493                  */
494                 //ASSERT(scheduler->all_threads_sleeping());
495         }
496 }
497
498 /** @brief Print execution stats */
499 void ModelChecker::print_stats() const
500 {
501         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
502         model_print("Number of redundant executions: %d\n", stats.num_redundant);
503         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
504         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
505         model_print("Total executions: %d\n", stats.num_total);
506         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
507 }
508
509 /**
510  * @brief End-of-exeuction print
511  * @param printbugs Should any existing bugs be printed?
512  */
513 void ModelChecker::print_execution(bool printbugs) const
514 {
515         print_program_output();
516
517         if (DBG_ENABLED() || params.verbose) {
518                 model_print("Earliest divergence point since last feasible execution:\n");
519                 if (earliest_diverge)
520                         earliest_diverge->print();
521                 else
522                         model_print("(Not set)\n");
523
524                 model_print("\n");
525                 print_stats();
526         }
527
528         /* Don't print invalid bugs */
529         if (printbugs)
530                 print_bugs();
531
532         model_print("\n");
533         print_summary();
534 }
535
536 /**
537  * Queries the model-checker for more executions to explore and, if one
538  * exists, resets the model-checker state to execute a new execution.
539  *
540  * @return If there are more executions to explore, return true. Otherwise,
541  * return false.
542  */
543 bool ModelChecker::next_execution()
544 {
545         DBG();
546         /* Is this execution a feasible execution that's worth bug-checking? */
547         bool complete = isfeasibleprefix() && (is_complete_execution() ||
548                         have_bug_reports());
549
550         /* End-of-execution bug checks */
551         if (complete) {
552                 if (is_deadlocked())
553                         assert_bug("Deadlock detected");
554
555                 checkDataRaces();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /**
582  * @brief Find the last fence-related backtracking conflict for a ModelAction
583  *
584  * This function performs the search for the most recent conflicting action
585  * against which we should perform backtracking, as affected by fence
586  * operations. This includes pairs of potentially-synchronizing actions which
587  * occur due to fence-acquire or fence-release, and hence should be explored in
588  * the opposite execution order.
589  *
590  * @param act The current action
591  * @return The most recent action which conflicts with act due to fences
592  */
593 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
594 {
595         /* Only perform release/acquire fence backtracking for stores */
596         if (!act->is_write())
597                 return NULL;
598
599         /* Find a fence-release (or, act is a release) */
600         ModelAction *last_release;
601         if (act->is_release())
602                 last_release = act;
603         else
604                 last_release = get_last_fence_release(act->get_tid());
605         if (!last_release)
606                 return NULL;
607
608         /* Skip past the release */
609         action_list_t *list = action_trace;
610         action_list_t::reverse_iterator rit;
611         for (rit = list->rbegin(); rit != list->rend(); rit++)
612                 if (*rit == last_release)
613                         break;
614         ASSERT(rit != list->rend());
615
616         /* Find a prior:
617          *   load-acquire
618          * or
619          *   load --sb-> fence-acquire */
620         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
621         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
622         bool found_acquire_fences = false;
623         for ( ; rit != list->rend(); rit++) {
624                 ModelAction *prev = *rit;
625                 if (act->same_thread(prev))
626                         continue;
627
628                 int tid = id_to_int(prev->get_tid());
629
630                 if (prev->is_read() && act->same_var(prev)) {
631                         if (prev->is_acquire()) {
632                                 /* Found most recent load-acquire, don't need
633                                  * to search for more fences */
634                                 if (!found_acquire_fences)
635                                         return NULL;
636                         } else {
637                                 prior_loads[tid] = prev;
638                         }
639                 }
640                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
641                         found_acquire_fences = true;
642                         acquire_fences[tid] = prev;
643                 }
644         }
645
646         ModelAction *latest_backtrack = NULL;
647         for (unsigned int i = 0; i < acquire_fences.size(); i++)
648                 if (acquire_fences[i] && prior_loads[i])
649                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
650                                 latest_backtrack = acquire_fences[i];
651         return latest_backtrack;
652 }
653
654 /**
655  * @brief Find the last backtracking conflict for a ModelAction
656  *
657  * This function performs the search for the most recent conflicting action
658  * against which we should perform backtracking. This primary includes pairs of
659  * synchronizing actions which should be explored in the opposite execution
660  * order.
661  *
662  * @param act The current action
663  * @return The most recent action which conflicts with act
664  */
665 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
666 {
667         switch (act->get_type()) {
668         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
669         case ATOMIC_READ:
670         case ATOMIC_WRITE:
671         case ATOMIC_RMW: {
672                 ModelAction *ret = NULL;
673
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (prev->could_synchronize_with(act)) {
680                                 ret = prev;
681                                 break;
682                         }
683                 }
684
685                 ModelAction *ret2 = get_last_fence_conflict(act);
686                 if (!ret2)
687                         return ret;
688                 if (!ret)
689                         return ret2;
690                 if (*ret < *ret2)
691                         return ret2;
692                 return ret;
693         }
694         case ATOMIC_LOCK:
695         case ATOMIC_TRYLOCK: {
696                 /* linear search: from most recent to oldest */
697                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
698                 action_list_t::reverse_iterator rit;
699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
700                         ModelAction *prev = *rit;
701                         if (act->is_conflicting_lock(prev))
702                                 return prev;
703                 }
704                 break;
705         }
706         case ATOMIC_UNLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (!act->same_thread(prev) && prev->is_failed_trylock())
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                         if (!act->same_thread(prev) && prev->is_notify())
726                                 return prev;
727                 }
728                 break;
729         }
730
731         case ATOMIC_NOTIFY_ALL:
732         case ATOMIC_NOTIFY_ONE: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_wait())
739                                 return prev;
740                 }
741                 break;
742         }
743         default:
744                 break;
745         }
746         return NULL;
747 }
748
749 /** This method finds backtracking points where we should try to
750  * reorder the parameter ModelAction against.
751  *
752  * @param the ModelAction to find backtracking points for.
753  */
754 void ModelChecker::set_backtracking(ModelAction *act)
755 {
756         Thread *t = get_thread(act);
757         ModelAction *prev = get_last_conflict(act);
758         if (prev == NULL)
759                 return;
760
761         Node *node = prev->get_node()->get_parent();
762
763         int low_tid, high_tid;
764         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
765                 low_tid = id_to_int(act->get_tid());
766                 high_tid = low_tid + 1;
767         } else {
768                 low_tid = 0;
769                 high_tid = get_num_threads();
770         }
771
772         for (int i = low_tid; i < high_tid; i++) {
773                 thread_id_t tid = int_to_id(i);
774
775                 /* Make sure this thread can be enabled here. */
776                 if (i >= node->get_num_threads())
777                         break;
778
779                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
780                 if (node->enabled_status(tid) != THREAD_ENABLED)
781                         continue;
782
783                 /* Check if this has been explored already */
784                 if (node->has_been_explored(tid))
785                         continue;
786
787                 /* See if fairness allows */
788                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
789                         bool unfair = false;
790                         for (int t = 0; t < node->get_num_threads(); t++) {
791                                 thread_id_t tother = int_to_id(t);
792                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
793                                         unfair = true;
794                                         break;
795                                 }
796                         }
797                         if (unfair)
798                                 continue;
799                 }
800                 /* Cache the latest backtracking point */
801                 set_latest_backtrack(prev);
802
803                 /* If this is a new backtracking point, mark the tree */
804                 if (!node->set_backtrack(tid))
805                         continue;
806                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
807                                         id_to_int(prev->get_tid()),
808                                         id_to_int(t->get_id()));
809                 if (DBG_ENABLED()) {
810                         prev->print();
811                         act->print();
812                 }
813         }
814 }
815
816 /**
817  * @brief Cache the a backtracking point as the "most recent", if eligible
818  *
819  * Note that this does not prepare the NodeStack for this backtracking
820  * operation, it only caches the action on a per-execution basis
821  *
822  * @param act The operation at which we should explore a different next action
823  * (i.e., backtracking point)
824  * @return True, if this action is now the most recent backtracking point;
825  * false otherwise
826  */
827 bool ModelChecker::set_latest_backtrack(ModelAction *act)
828 {
829         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
830                 priv->next_backtrack = act;
831                 return true;
832         }
833         return false;
834 }
835
836 /**
837  * Returns last backtracking point. The model checker will explore a different
838  * path for this point in the next execution.
839  * @return The ModelAction at which the next execution should diverge.
840  */
841 ModelAction * ModelChecker::get_next_backtrack()
842 {
843         ModelAction *next = priv->next_backtrack;
844         priv->next_backtrack = NULL;
845         return next;
846 }
847
848 /**
849  * Processes a read model action.
850  * @param curr is the read model action to process.
851  * @return True if processing this read updates the mo_graph.
852  */
853 bool ModelChecker::process_read(ModelAction *curr)
854 {
855         Node *node = curr->get_node();
856         uint64_t value = VALUE_NONE;
857         while (true) {
858                 bool updated = false;
859                 switch (node->get_read_from_status()) {
860                 case READ_FROM_PAST: {
861                         const ModelAction *rf = node->get_read_from_past();
862                         ASSERT(rf);
863
864                         mo_graph->startChanges();
865
866                         ASSERT(!is_infeasible());
867                         if (!check_recency(curr, rf)) {
868                                 if (node->increment_read_from()) {
869                                         mo_graph->rollbackChanges();
870                                         continue;
871                                 } else {
872                                         priv->too_many_reads = true;
873                                 }
874                         }
875
876                         updated = r_modification_order(curr, rf);
877                         value = rf->get_value();
878                         read_from(curr, rf);
879                         mo_graph->commitChanges();
880                         mo_check_promises(curr, true);
881                         break;
882                 }
883                 case READ_FROM_PROMISE: {
884                         Promise *promise = curr->get_node()->get_read_from_promise();
885                         promise->add_reader(curr);
886                         value = promise->get_value();
887                         curr->set_read_from_promise(promise);
888                         mo_graph->startChanges();
889                         if (!check_recency(curr, promise))
890                                 priv->too_many_reads = true;
891                         updated = r_modification_order(curr, promise);
892                         mo_graph->commitChanges();
893                         break;
894                 }
895                 case READ_FROM_FUTURE: {
896                         /* Read from future value */
897                         struct future_value fv = node->get_future_value();
898                         Promise *promise = new Promise(curr, fv);
899                         value = fv.value;
900                         curr->set_read_from_promise(promise);
901                         promises->push_back(promise);
902                         mo_graph->startChanges();
903                         updated = r_modification_order(curr, promise);
904                         mo_graph->commitChanges();
905                         break;
906                 }
907                 default:
908                         ASSERT(false);
909                 }
910                 get_thread(curr)->set_return_value(value);
911                 return updated;
912         }
913 }
914
915 /**
916  * Processes a lock, trylock, or unlock model action.  @param curr is
917  * the read model action to process.
918  *
919  * The try lock operation checks whether the lock is taken.  If not,
920  * it falls to the normal lock operation case.  If so, it returns
921  * fail.
922  *
923  * The lock operation has already been checked that it is enabled, so
924  * it just grabs the lock and synchronizes with the previous unlock.
925  *
926  * The unlock operation has to re-enable all of the threads that are
927  * waiting on the lock.
928  *
929  * @return True if synchronization was updated; false otherwise
930  */
931 bool ModelChecker::process_mutex(ModelAction *curr)
932 {
933         std::mutex *mutex = NULL;
934         struct std::mutex_state *state = NULL;
935
936         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
937                 mutex = (std::mutex *)curr->get_location();
938                 state = mutex->get_state();
939         } else if (curr->is_wait()) {
940                 mutex = (std::mutex *)curr->get_value();
941                 state = mutex->get_state();
942         }
943
944         switch (curr->get_type()) {
945         case ATOMIC_TRYLOCK: {
946                 bool success = !state->islocked;
947                 curr->set_try_lock(success);
948                 if (!success) {
949                         get_thread(curr)->set_return_value(0);
950                         break;
951                 }
952                 get_thread(curr)->set_return_value(1);
953         }
954                 //otherwise fall into the lock case
955         case ATOMIC_LOCK: {
956                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
957                         assert_bug("Lock access before initialization");
958                 state->islocked = true;
959                 ModelAction *unlock = get_last_unlock(curr);
960                 //synchronize with the previous unlock statement
961                 if (unlock != NULL) {
962                         curr->synchronize_with(unlock);
963                         return true;
964                 }
965                 break;
966         }
967         case ATOMIC_UNLOCK: {
968                 //unlock the lock
969                 state->islocked = false;
970                 //wake up the other threads
971                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
972                 //activate all the waiting threads
973                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
974                         scheduler->wake(get_thread(*rit));
975                 }
976                 waiters->clear();
977                 break;
978         }
979         case ATOMIC_WAIT: {
980                 //unlock the lock
981                 state->islocked = false;
982                 //wake up the other threads
983                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
984                 //activate all the waiting threads
985                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
986                         scheduler->wake(get_thread(*rit));
987                 }
988                 waiters->clear();
989                 //check whether we should go to sleep or not...simulate spurious failures
990                 if (curr->get_node()->get_misc() == 0) {
991                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
992                         //disable us
993                         scheduler->sleep(get_thread(curr));
994                 }
995                 break;
996         }
997         case ATOMIC_NOTIFY_ALL: {
998                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
999                 //activate all the waiting threads
1000                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1001                         scheduler->wake(get_thread(*rit));
1002                 }
1003                 waiters->clear();
1004                 break;
1005         }
1006         case ATOMIC_NOTIFY_ONE: {
1007                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1008                 int wakeupthread = curr->get_node()->get_misc();
1009                 action_list_t::iterator it = waiters->begin();
1010                 advance(it, wakeupthread);
1011                 scheduler->wake(get_thread(*it));
1012                 waiters->erase(it);
1013                 break;
1014         }
1015
1016         default:
1017                 ASSERT(0);
1018         }
1019         return false;
1020 }
1021
1022 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1023 {
1024         /* Do more ambitious checks now that mo is more complete */
1025         if (mo_may_allow(writer, reader)) {
1026                 Node *node = reader->get_node();
1027
1028                 /* Find an ancestor thread which exists at the time of the reader */
1029                 Thread *write_thread = get_thread(writer);
1030                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1031                         write_thread = write_thread->get_parent();
1032
1033                 struct future_value fv = {
1034                         writer->get_write_value(),
1035                         writer->get_seq_number() + params.maxfuturedelay,
1036                         write_thread->get_id(),
1037                 };
1038                 if (node->add_future_value(fv))
1039                         set_latest_backtrack(reader);
1040         }
1041 }
1042
1043 /**
1044  * Process a write ModelAction
1045  * @param curr The ModelAction to process
1046  * @return True if the mo_graph was updated or promises were resolved
1047  */
1048 bool ModelChecker::process_write(ModelAction *curr)
1049 {
1050         /* Readers to which we may send our future value */
1051         std::vector< ModelAction *, ModelAlloc<ModelAction *> > send_fv;
1052
1053         bool updated_mod_order = w_modification_order(curr, &send_fv);
1054         int promise_idx = get_promise_to_resolve(curr);
1055         const ModelAction *earliest_promise_reader;
1056         bool updated_promises = false;
1057
1058         if (promise_idx >= 0) {
1059                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1060                 updated_promises = resolve_promise(curr, promise_idx);
1061         } else
1062                 earliest_promise_reader = NULL;
1063
1064         /* Don't send future values to reads after the Promise we resolve */
1065         for (unsigned int i = 0; i < send_fv.size(); i++) {
1066                 ModelAction *read = send_fv[i];
1067                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1068                         futurevalues->push_back(PendingFutureValue(curr, read));
1069         }
1070
1071         if (promises->size() == 0) {
1072                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1073                         struct PendingFutureValue pfv = (*futurevalues)[i];
1074                         add_future_value(pfv.writer, pfv.act);
1075                 }
1076                 futurevalues->clear();
1077         }
1078
1079         mo_graph->commitChanges();
1080         mo_check_promises(curr, false);
1081
1082         get_thread(curr)->set_return_value(VALUE_NONE);
1083         return updated_mod_order || updated_promises;
1084 }
1085
1086 /**
1087  * Process a fence ModelAction
1088  * @param curr The ModelAction to process
1089  * @return True if synchronization was updated
1090  */
1091 bool ModelChecker::process_fence(ModelAction *curr)
1092 {
1093         /*
1094          * fence-relaxed: no-op
1095          * fence-release: only log the occurence (not in this function), for
1096          *   use in later synchronization
1097          * fence-acquire (this function): search for hypothetical release
1098          *   sequences
1099          */
1100         bool updated = false;
1101         if (curr->is_acquire()) {
1102                 action_list_t *list = action_trace;
1103                 action_list_t::reverse_iterator rit;
1104                 /* Find X : is_read(X) && X --sb-> curr */
1105                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1106                         ModelAction *act = *rit;
1107                         if (act == curr)
1108                                 continue;
1109                         if (act->get_tid() != curr->get_tid())
1110                                 continue;
1111                         /* Stop at the beginning of the thread */
1112                         if (act->is_thread_start())
1113                                 break;
1114                         /* Stop once we reach a prior fence-acquire */
1115                         if (act->is_fence() && act->is_acquire())
1116                                 break;
1117                         if (!act->is_read())
1118                                 continue;
1119                         /* read-acquire will find its own release sequences */
1120                         if (act->is_acquire())
1121                                 continue;
1122
1123                         /* Establish hypothetical release sequences */
1124                         rel_heads_list_t release_heads;
1125                         get_release_seq_heads(curr, act, &release_heads);
1126                         for (unsigned int i = 0; i < release_heads.size(); i++)
1127                                 if (!curr->synchronize_with(release_heads[i]))
1128                                         set_bad_synchronization();
1129                         if (release_heads.size() != 0)
1130                                 updated = true;
1131                 }
1132         }
1133         return updated;
1134 }
1135
1136 /**
1137  * @brief Process the current action for thread-related activity
1138  *
1139  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1140  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1141  * synchronization, etc.  This function is a no-op for non-THREAD actions
1142  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1143  *
1144  * @param curr The current action
1145  * @return True if synchronization was updated or a thread completed
1146  */
1147 bool ModelChecker::process_thread_action(ModelAction *curr)
1148 {
1149         bool updated = false;
1150
1151         switch (curr->get_type()) {
1152         case THREAD_CREATE: {
1153                 thrd_t *thrd = (thrd_t *)curr->get_location();
1154                 struct thread_params *params = (struct thread_params *)curr->get_value();
1155                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1156                 add_thread(th);
1157                 th->set_creation(curr);
1158                 /* Promises can be satisfied by children */
1159                 for (unsigned int i = 0; i < promises->size(); i++) {
1160                         Promise *promise = (*promises)[i];
1161                         if (promise->thread_is_available(curr->get_tid()))
1162                                 promise->add_thread(th->get_id());
1163                 }
1164                 break;
1165         }
1166         case THREAD_JOIN: {
1167                 Thread *blocking = curr->get_thread_operand();
1168                 ModelAction *act = get_last_action(blocking->get_id());
1169                 curr->synchronize_with(act);
1170                 updated = true; /* trigger rel-seq checks */
1171                 break;
1172         }
1173         case THREAD_FINISH: {
1174                 Thread *th = get_thread(curr);
1175                 while (!th->wait_list_empty()) {
1176                         ModelAction *act = th->pop_wait_list();
1177                         scheduler->wake(get_thread(act));
1178                 }
1179                 th->complete();
1180                 /* Completed thread can't satisfy promises */
1181                 for (unsigned int i = 0; i < promises->size(); i++) {
1182                         Promise *promise = (*promises)[i];
1183                         if (promise->thread_is_available(th->get_id()))
1184                                 if (promise->eliminate_thread(th->get_id()))
1185                                         priv->failed_promise = true;
1186                 }
1187                 updated = true; /* trigger rel-seq checks */
1188                 break;
1189         }
1190         case THREAD_START: {
1191                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1192                 break;
1193         }
1194         default:
1195                 break;
1196         }
1197
1198         return updated;
1199 }
1200
1201 /**
1202  * @brief Process the current action for release sequence fixup activity
1203  *
1204  * Performs model-checker release sequence fixups for the current action,
1205  * forcing a single pending release sequence to break (with a given, potential
1206  * "loose" write) or to complete (i.e., synchronize). If a pending release
1207  * sequence forms a complete release sequence, then we must perform the fixup
1208  * synchronization, mo_graph additions, etc.
1209  *
1210  * @param curr The current action; must be a release sequence fixup action
1211  * @param work_queue The work queue to which to add work items as they are
1212  * generated
1213  */
1214 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1215 {
1216         const ModelAction *write = curr->get_node()->get_relseq_break();
1217         struct release_seq *sequence = pending_rel_seqs->back();
1218         pending_rel_seqs->pop_back();
1219         ASSERT(sequence);
1220         ModelAction *acquire = sequence->acquire;
1221         const ModelAction *rf = sequence->rf;
1222         const ModelAction *release = sequence->release;
1223         ASSERT(acquire);
1224         ASSERT(release);
1225         ASSERT(rf);
1226         ASSERT(release->same_thread(rf));
1227
1228         if (write == NULL) {
1229                 /**
1230                  * @todo Forcing a synchronization requires that we set
1231                  * modification order constraints. For instance, we can't allow
1232                  * a fixup sequence in which two separate read-acquire
1233                  * operations read from the same sequence, where the first one
1234                  * synchronizes and the other doesn't. Essentially, we can't
1235                  * allow any writes to insert themselves between 'release' and
1236                  * 'rf'
1237                  */
1238
1239                 /* Must synchronize */
1240                 if (!acquire->synchronize_with(release)) {
1241                         set_bad_synchronization();
1242                         return;
1243                 }
1244                 /* Re-check all pending release sequences */
1245                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1246                 /* Re-check act for mo_graph edges */
1247                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1248
1249                 /* propagate synchronization to later actions */
1250                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1251                 for (; (*rit) != acquire; rit++) {
1252                         ModelAction *propagate = *rit;
1253                         if (acquire->happens_before(propagate)) {
1254                                 propagate->synchronize_with(acquire);
1255                                 /* Re-check 'propagate' for mo_graph edges */
1256                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1257                         }
1258                 }
1259         } else {
1260                 /* Break release sequence with new edges:
1261                  *   release --mo--> write --mo--> rf */
1262                 mo_graph->addEdge(release, write);
1263                 mo_graph->addEdge(write, rf);
1264         }
1265
1266         /* See if we have realized a data race */
1267         checkDataRaces();
1268 }
1269
1270 /**
1271  * Initialize the current action by performing one or more of the following
1272  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1273  * in the NodeStack, manipulating backtracking sets, allocating and
1274  * initializing clock vectors, and computing the promises to fulfill.
1275  *
1276  * @param curr The current action, as passed from the user context; may be
1277  * freed/invalidated after the execution of this function, with a different
1278  * action "returned" its place (pass-by-reference)
1279  * @return True if curr is a newly-explored action; false otherwise
1280  */
1281 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1282 {
1283         ModelAction *newcurr;
1284
1285         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1286                 newcurr = process_rmw(*curr);
1287                 delete *curr;
1288
1289                 if (newcurr->is_rmw())
1290                         compute_promises(newcurr);
1291
1292                 *curr = newcurr;
1293                 return false;
1294         }
1295
1296         (*curr)->set_seq_number(get_next_seq_num());
1297
1298         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1299         if (newcurr) {
1300                 /* First restore type and order in case of RMW operation */
1301                 if ((*curr)->is_rmwr())
1302                         newcurr->copy_typeandorder(*curr);
1303
1304                 ASSERT((*curr)->get_location() == newcurr->get_location());
1305                 newcurr->copy_from_new(*curr);
1306
1307                 /* Discard duplicate ModelAction; use action from NodeStack */
1308                 delete *curr;
1309
1310                 /* Always compute new clock vector */
1311                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1312
1313                 *curr = newcurr;
1314                 return false; /* Action was explored previously */
1315         } else {
1316                 newcurr = *curr;
1317
1318                 /* Always compute new clock vector */
1319                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1320
1321                 /* Assign most recent release fence */
1322                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1323
1324                 /*
1325                  * Perform one-time actions when pushing new ModelAction onto
1326                  * NodeStack
1327                  */
1328                 if (newcurr->is_write())
1329                         compute_promises(newcurr);
1330                 else if (newcurr->is_relseq_fixup())
1331                         compute_relseq_breakwrites(newcurr);
1332                 else if (newcurr->is_wait())
1333                         newcurr->get_node()->set_misc_max(2);
1334                 else if (newcurr->is_notify_one()) {
1335                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1336                 }
1337                 return true; /* This was a new ModelAction */
1338         }
1339 }
1340
1341 /**
1342  * @brief Establish reads-from relation between two actions
1343  *
1344  * Perform basic operations involved with establishing a concrete rf relation,
1345  * including setting the ModelAction data and checking for release sequences.
1346  *
1347  * @param act The action that is reading (must be a read)
1348  * @param rf The action from which we are reading (must be a write)
1349  *
1350  * @return True if this read established synchronization
1351  */
1352 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1353 {
1354         ASSERT(rf);
1355         act->set_read_from(rf);
1356         if (act->is_acquire()) {
1357                 rel_heads_list_t release_heads;
1358                 get_release_seq_heads(act, act, &release_heads);
1359                 int num_heads = release_heads.size();
1360                 for (unsigned int i = 0; i < release_heads.size(); i++)
1361                         if (!act->synchronize_with(release_heads[i])) {
1362                                 set_bad_synchronization();
1363                                 num_heads--;
1364                         }
1365                 return num_heads > 0;
1366         }
1367         return false;
1368 }
1369
1370 /**
1371  * Check promises and eliminate potentially-satisfying threads when a thread is
1372  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1373  * no longer satisfy a promise generated from that thread.
1374  *
1375  * @param blocker The thread on which a thread is waiting
1376  * @param waiting The waiting thread
1377  */
1378 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1379 {
1380         for (unsigned int i = 0; i < promises->size(); i++) {
1381                 Promise *promise = (*promises)[i];
1382                 if (!promise->thread_is_available(waiting->get_id()))
1383                         continue;
1384                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1385                         ModelAction *reader = promise->get_reader(j);
1386                         if (reader->get_tid() != blocker->get_id())
1387                                 continue;
1388                         if (promise->eliminate_thread(waiting->get_id())) {
1389                                 /* Promise has failed */
1390                                 priv->failed_promise = true;
1391                         } else {
1392                                 /* Only eliminate the 'waiting' thread once */
1393                                 return;
1394                         }
1395                 }
1396         }
1397 }
1398
1399 /**
1400  * @brief Check whether a model action is enabled.
1401  *
1402  * Checks whether a lock or join operation would be successful (i.e., is the
1403  * lock already locked, or is the joined thread already complete). If not, put
1404  * the action in a waiter list.
1405  *
1406  * @param curr is the ModelAction to check whether it is enabled.
1407  * @return a bool that indicates whether the action is enabled.
1408  */
1409 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1410         if (curr->is_lock()) {
1411                 std::mutex *lock = (std::mutex *)curr->get_location();
1412                 struct std::mutex_state *state = lock->get_state();
1413                 if (state->islocked) {
1414                         //Stick the action in the appropriate waiting queue
1415                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1416                         return false;
1417                 }
1418         } else if (curr->get_type() == THREAD_JOIN) {
1419                 Thread *blocking = (Thread *)curr->get_location();
1420                 if (!blocking->is_complete()) {
1421                         blocking->push_wait_list(curr);
1422                         thread_blocking_check_promises(blocking, get_thread(curr));
1423                         return false;
1424                 }
1425         }
1426
1427         return true;
1428 }
1429
1430 /**
1431  * This is the heart of the model checker routine. It performs model-checking
1432  * actions corresponding to a given "current action." Among other processes, it
1433  * calculates reads-from relationships, updates synchronization clock vectors,
1434  * forms a memory_order constraints graph, and handles replay/backtrack
1435  * execution when running permutations of previously-observed executions.
1436  *
1437  * @param curr The current action to process
1438  * @return The ModelAction that is actually executed; may be different than
1439  * curr; may be NULL, if the current action is not enabled to run
1440  */
1441 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1442 {
1443         ASSERT(curr);
1444         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1445
1446         if (!check_action_enabled(curr)) {
1447                 /* Make the execution look like we chose to run this action
1448                  * much later, when a lock/join can succeed */
1449                 get_thread(curr)->set_pending(curr);
1450                 scheduler->sleep(get_thread(curr));
1451                 return NULL;
1452         }
1453
1454         bool newly_explored = initialize_curr_action(&curr);
1455
1456         DBG();
1457         if (DBG_ENABLED())
1458                 curr->print();
1459
1460         wake_up_sleeping_actions(curr);
1461
1462         /* Add the action to lists before any other model-checking tasks */
1463         if (!second_part_of_rmw)
1464                 add_action_to_lists(curr);
1465
1466         /* Build may_read_from set for newly-created actions */
1467         if (newly_explored && curr->is_read())
1468                 build_may_read_from(curr);
1469
1470         /* Initialize work_queue with the "current action" work */
1471         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1472         while (!work_queue.empty() && !has_asserted()) {
1473                 WorkQueueEntry work = work_queue.front();
1474                 work_queue.pop_front();
1475
1476                 switch (work.type) {
1477                 case WORK_CHECK_CURR_ACTION: {
1478                         ModelAction *act = work.action;
1479                         bool update = false; /* update this location's release seq's */
1480                         bool update_all = false; /* update all release seq's */
1481
1482                         if (process_thread_action(curr))
1483                                 update_all = true;
1484
1485                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1486                                 update = true;
1487
1488                         if (act->is_write() && process_write(act))
1489                                 update = true;
1490
1491                         if (act->is_fence() && process_fence(act))
1492                                 update_all = true;
1493
1494                         if (act->is_mutex_op() && process_mutex(act))
1495                                 update_all = true;
1496
1497                         if (act->is_relseq_fixup())
1498                                 process_relseq_fixup(curr, &work_queue);
1499
1500                         if (update_all)
1501                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1502                         else if (update)
1503                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1504                         break;
1505                 }
1506                 case WORK_CHECK_RELEASE_SEQ:
1507                         resolve_release_sequences(work.location, &work_queue);
1508                         break;
1509                 case WORK_CHECK_MO_EDGES: {
1510                         /** @todo Complete verification of work_queue */
1511                         ModelAction *act = work.action;
1512                         bool updated = false;
1513
1514                         if (act->is_read()) {
1515                                 const ModelAction *rf = act->get_reads_from();
1516                                 const Promise *promise = act->get_reads_from_promise();
1517                                 if (rf) {
1518                                         if (r_modification_order(act, rf))
1519                                                 updated = true;
1520                                 } else if (promise) {
1521                                         if (r_modification_order(act, promise))
1522                                                 updated = true;
1523                                 }
1524                         }
1525                         if (act->is_write()) {
1526                                 if (w_modification_order(act, NULL))
1527                                         updated = true;
1528                         }
1529                         mo_graph->commitChanges();
1530
1531                         if (updated)
1532                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1533                         break;
1534                 }
1535                 default:
1536                         ASSERT(false);
1537                         break;
1538                 }
1539         }
1540
1541         check_curr_backtracking(curr);
1542         set_backtracking(curr);
1543         return curr;
1544 }
1545
1546 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1547 {
1548         Node *currnode = curr->get_node();
1549         Node *parnode = currnode->get_parent();
1550
1551         if ((parnode && !parnode->backtrack_empty()) ||
1552                          !currnode->misc_empty() ||
1553                          !currnode->read_from_empty() ||
1554                          !currnode->promise_empty() ||
1555                          !currnode->relseq_break_empty()) {
1556                 set_latest_backtrack(curr);
1557         }
1558 }
1559
1560 bool ModelChecker::promises_expired() const
1561 {
1562         for (unsigned int i = 0; i < promises->size(); i++) {
1563                 Promise *promise = (*promises)[i];
1564                 if (promise->get_expiration() < priv->used_sequence_numbers)
1565                         return true;
1566         }
1567         return false;
1568 }
1569
1570 /**
1571  * This is the strongest feasibility check available.
1572  * @return whether the current trace (partial or complete) must be a prefix of
1573  * a feasible trace.
1574  */
1575 bool ModelChecker::isfeasibleprefix() const
1576 {
1577         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1578 }
1579
1580 /**
1581  * Print disagnostic information about an infeasible execution
1582  * @param prefix A string to prefix the output with; if NULL, then a default
1583  * message prefix will be provided
1584  */
1585 void ModelChecker::print_infeasibility(const char *prefix) const
1586 {
1587         char buf[100];
1588         char *ptr = buf;
1589         if (mo_graph->checkForCycles())
1590                 ptr += sprintf(ptr, "[mo cycle]");
1591         if (priv->failed_promise)
1592                 ptr += sprintf(ptr, "[failed promise]");
1593         if (priv->too_many_reads)
1594                 ptr += sprintf(ptr, "[too many reads]");
1595         if (priv->no_valid_reads)
1596                 ptr += sprintf(ptr, "[no valid reads-from]");
1597         if (priv->bad_synchronization)
1598                 ptr += sprintf(ptr, "[bad sw ordering]");
1599         if (promises_expired())
1600                 ptr += sprintf(ptr, "[promise expired]");
1601         if (promises->size() != 0)
1602                 ptr += sprintf(ptr, "[unresolved promise]");
1603         if (ptr != buf)
1604                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1605 }
1606
1607 /**
1608  * Returns whether the current completed trace is feasible, except for pending
1609  * release sequences.
1610  */
1611 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1612 {
1613         return !is_infeasible() && promises->size() == 0;
1614 }
1615
1616 /**
1617  * Check if the current partial trace is infeasible. Does not check any
1618  * end-of-execution flags, which might rule out the execution. Thus, this is
1619  * useful only for ruling an execution as infeasible.
1620  * @return whether the current partial trace is infeasible.
1621  */
1622 bool ModelChecker::is_infeasible() const
1623 {
1624         return mo_graph->checkForCycles() ||
1625                 priv->no_valid_reads ||
1626                 priv->failed_promise ||
1627                 priv->too_many_reads ||
1628                 priv->bad_synchronization ||
1629                 promises_expired();
1630 }
1631
1632 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1633 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1634         ModelAction *lastread = get_last_action(act->get_tid());
1635         lastread->process_rmw(act);
1636         if (act->is_rmw()) {
1637                 if (lastread->get_reads_from())
1638                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1639                 else
1640                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1641                 mo_graph->commitChanges();
1642         }
1643         return lastread;
1644 }
1645
1646 /**
1647  * A helper function for ModelChecker::check_recency, to check if the current
1648  * thread is able to read from a different write/promise for 'params.maxreads'
1649  * number of steps and if that write/promise should become visible (i.e., is
1650  * ordered later in the modification order). This helps model memory liveness.
1651  *
1652  * @param curr The current action. Must be a read.
1653  * @param rf The write/promise from which we plan to read
1654  * @param other_rf The write/promise from which we may read
1655  * @return True if we were able to read from other_rf for params.maxreads steps
1656  */
1657 template <typename T, typename U>
1658 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1659 {
1660         /* Need a different write/promise */
1661         if (other_rf->equals(rf))
1662                 return false;
1663
1664         /* Only look for "newer" writes/promises */
1665         if (!mo_graph->checkReachable(rf, other_rf))
1666                 return false;
1667
1668         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1669         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1670         action_list_t::reverse_iterator rit = list->rbegin();
1671         ASSERT((*rit) == curr);
1672         /* Skip past curr */
1673         rit++;
1674
1675         /* Does this write/promise work for everyone? */
1676         for (int i = 0; i < params.maxreads; i++, rit++) {
1677                 ModelAction *act = *rit;
1678                 if (!act->may_read_from(other_rf))
1679                         return false;
1680         }
1681         return true;
1682 }
1683
1684 /**
1685  * Checks whether a thread has read from the same write or Promise for too many
1686  * times without seeing the effects of a later write/Promise.
1687  *
1688  * Basic idea:
1689  * 1) there must a different write/promise that we could read from,
1690  * 2) we must have read from the same write/promise in excess of maxreads times,
1691  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1692  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1693  *
1694  * If so, we decide that the execution is no longer feasible.
1695  *
1696  * @param curr The current action. Must be a read.
1697  * @param rf The ModelAction/Promise from which we might read.
1698  * @return True if the read should succeed; false otherwise
1699  */
1700 template <typename T>
1701 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1702 {
1703         if (!params.maxreads)
1704                 return true;
1705
1706         //NOTE: Next check is just optimization, not really necessary....
1707         if (curr->get_node()->get_read_from_past_size() +
1708                         curr->get_node()->get_read_from_promise_size() <= 1)
1709                 return true;
1710
1711         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1712         int tid = id_to_int(curr->get_tid());
1713         ASSERT(tid < (int)thrd_lists->size());
1714         action_list_t *list = &(*thrd_lists)[tid];
1715         action_list_t::reverse_iterator rit = list->rbegin();
1716         ASSERT((*rit) == curr);
1717         /* Skip past curr */
1718         rit++;
1719
1720         action_list_t::reverse_iterator ritcopy = rit;
1721         /* See if we have enough reads from the same value */
1722         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1723                 if (ritcopy == list->rend())
1724                         return true;
1725                 ModelAction *act = *ritcopy;
1726                 if (!act->is_read())
1727                         return true;
1728                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1729                         return true;
1730                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1731                         return true;
1732                 if (act->get_node()->get_read_from_past_size() +
1733                                 act->get_node()->get_read_from_promise_size() <= 1)
1734                         return true;
1735         }
1736         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1737                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1738                 if (should_read_instead(curr, rf, write))
1739                         return false; /* liveness failure */
1740         }
1741         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1742                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1743                 if (should_read_instead(curr, rf, promise))
1744                         return false; /* liveness failure */
1745         }
1746         return true;
1747 }
1748
1749 /**
1750  * Updates the mo_graph with the constraints imposed from the current
1751  * read.
1752  *
1753  * Basic idea is the following: Go through each other thread and find
1754  * the last action that happened before our read.  Two cases:
1755  *
1756  * (1) The action is a write => that write must either occur before
1757  * the write we read from or be the write we read from.
1758  *
1759  * (2) The action is a read => the write that that action read from
1760  * must occur before the write we read from or be the same write.
1761  *
1762  * @param curr The current action. Must be a read.
1763  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1764  * @return True if modification order edges were added; false otherwise
1765  */
1766 template <typename rf_type>
1767 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1768 {
1769         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1770         unsigned int i;
1771         bool added = false;
1772         ASSERT(curr->is_read());
1773
1774         /* Last SC fence in the current thread */
1775         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1776
1777         /* Iterate over all threads */
1778         for (i = 0; i < thrd_lists->size(); i++) {
1779                 /* Last SC fence in thread i */
1780                 ModelAction *last_sc_fence_thread_local = NULL;
1781                 if (int_to_id((int)i) != curr->get_tid())
1782                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1783
1784                 /* Last SC fence in thread i, before last SC fence in current thread */
1785                 ModelAction *last_sc_fence_thread_before = NULL;
1786                 if (last_sc_fence_local)
1787                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1788
1789                 /* Iterate over actions in thread, starting from most recent */
1790                 action_list_t *list = &(*thrd_lists)[i];
1791                 action_list_t::reverse_iterator rit;
1792                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1793                         ModelAction *act = *rit;
1794
1795                         if (act->is_write() && !act->equals(rf) && act != curr) {
1796                                 /* C++, Section 29.3 statement 5 */
1797                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1798                                                 *act < *last_sc_fence_thread_local) {
1799                                         added = mo_graph->addEdge(act, rf) || added;
1800                                         break;
1801                                 }
1802                                 /* C++, Section 29.3 statement 4 */
1803                                 else if (act->is_seqcst() && last_sc_fence_local &&
1804                                                 *act < *last_sc_fence_local) {
1805                                         added = mo_graph->addEdge(act, rf) || added;
1806                                         break;
1807                                 }
1808                                 /* C++, Section 29.3 statement 6 */
1809                                 else if (last_sc_fence_thread_before &&
1810                                                 *act < *last_sc_fence_thread_before) {
1811                                         added = mo_graph->addEdge(act, rf) || added;
1812                                         break;
1813                                 }
1814                         }
1815
1816                         /*
1817                          * Include at most one act per-thread that "happens
1818                          * before" curr. Don't consider reflexively.
1819                          */
1820                         if (act->happens_before(curr) && act != curr) {
1821                                 if (act->is_write()) {
1822                                         if (!act->equals(rf)) {
1823                                                 added = mo_graph->addEdge(act, rf) || added;
1824                                         }
1825                                 } else {
1826                                         const ModelAction *prevrf = act->get_reads_from();
1827                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1828                                         if (prevrf) {
1829                                                 if (!prevrf->equals(rf))
1830                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1831                                         } else if (!prevrf_promise->equals(rf)) {
1832                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1833                                         }
1834                                 }
1835                                 break;
1836                         }
1837                 }
1838         }
1839
1840         /*
1841          * All compatible, thread-exclusive promises must be ordered after any
1842          * concrete loads from the same thread
1843          */
1844         for (unsigned int i = 0; i < promises->size(); i++)
1845                 if ((*promises)[i]->is_compatible_exclusive(curr))
1846                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1847
1848         return added;
1849 }
1850
1851 /**
1852  * Updates the mo_graph with the constraints imposed from the current write.
1853  *
1854  * Basic idea is the following: Go through each other thread and find
1855  * the lastest action that happened before our write.  Two cases:
1856  *
1857  * (1) The action is a write => that write must occur before
1858  * the current write
1859  *
1860  * (2) The action is a read => the write that that action read from
1861  * must occur before the current write.
1862  *
1863  * This method also handles two other issues:
1864  *
1865  * (I) Sequential Consistency: Making sure that if the current write is
1866  * seq_cst, that it occurs after the previous seq_cst write.
1867  *
1868  * (II) Sending the write back to non-synchronizing reads.
1869  *
1870  * @param curr The current action. Must be a write.
1871  * @param send_fv A vector for stashing reads to which we may pass our future
1872  * value. If NULL, then don't record any future values.
1873  * @return True if modification order edges were added; false otherwise
1874  */
1875 bool ModelChecker::w_modification_order(ModelAction *curr, std::vector< ModelAction *, ModelAlloc<ModelAction *> > *send_fv)
1876 {
1877         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1878         unsigned int i;
1879         bool added = false;
1880         ASSERT(curr->is_write());
1881
1882         if (curr->is_seqcst()) {
1883                 /* We have to at least see the last sequentially consistent write,
1884                          so we are initialized. */
1885                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1886                 if (last_seq_cst != NULL) {
1887                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1888                 }
1889         }
1890
1891         /* Last SC fence in the current thread */
1892         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1893
1894         /* Iterate over all threads */
1895         for (i = 0; i < thrd_lists->size(); i++) {
1896                 /* Last SC fence in thread i, before last SC fence in current thread */
1897                 ModelAction *last_sc_fence_thread_before = NULL;
1898                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1899                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1900
1901                 /* Iterate over actions in thread, starting from most recent */
1902                 action_list_t *list = &(*thrd_lists)[i];
1903                 action_list_t::reverse_iterator rit;
1904                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1905                         ModelAction *act = *rit;
1906                         if (act == curr) {
1907                                 /*
1908                                  * 1) If RMW and it actually read from something, then we
1909                                  * already have all relevant edges, so just skip to next
1910                                  * thread.
1911                                  *
1912                                  * 2) If RMW and it didn't read from anything, we should
1913                                  * whatever edge we can get to speed up convergence.
1914                                  *
1915                                  * 3) If normal write, we need to look at earlier actions, so
1916                                  * continue processing list.
1917                                  */
1918                                 if (curr->is_rmw()) {
1919                                         if (curr->get_reads_from() != NULL)
1920                                                 break;
1921                                         else
1922                                                 continue;
1923                                 } else
1924                                         continue;
1925                         }
1926
1927                         /* C++, Section 29.3 statement 7 */
1928                         if (last_sc_fence_thread_before && act->is_write() &&
1929                                         *act < *last_sc_fence_thread_before) {
1930                                 added = mo_graph->addEdge(act, curr) || added;
1931                                 break;
1932                         }
1933
1934                         /*
1935                          * Include at most one act per-thread that "happens
1936                          * before" curr
1937                          */
1938                         if (act->happens_before(curr)) {
1939                                 /*
1940                                  * Note: if act is RMW, just add edge:
1941                                  *   act --mo--> curr
1942                                  * The following edge should be handled elsewhere:
1943                                  *   readfrom(act) --mo--> act
1944                                  */
1945                                 if (act->is_write())
1946                                         added = mo_graph->addEdge(act, curr) || added;
1947                                 else if (act->is_read()) {
1948                                         //if previous read accessed a null, just keep going
1949                                         if (act->get_reads_from() == NULL)
1950                                                 continue;
1951                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1952                                 }
1953                                 break;
1954                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1955                                                      !act->same_thread(curr)) {
1956                                 /* We have an action that:
1957                                    (1) did not happen before us
1958                                    (2) is a read and we are a write
1959                                    (3) cannot synchronize with us
1960                                    (4) is in a different thread
1961                                    =>
1962                                    that read could potentially read from our write.  Note that
1963                                    these checks are overly conservative at this point, we'll
1964                                    do more checks before actually removing the
1965                                    pendingfuturevalue.
1966
1967                                  */
1968                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1969                                         if (!is_infeasible())
1970                                                 send_fv->push_back(act);
1971                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1972                                                 add_future_value(curr, act);
1973                                 }
1974                         }
1975                 }
1976         }
1977
1978         /*
1979          * All compatible, thread-exclusive promises must be ordered after any
1980          * concrete stores to the same thread, or else they can be merged with
1981          * this store later
1982          */
1983         for (unsigned int i = 0; i < promises->size(); i++)
1984                 if ((*promises)[i]->is_compatible_exclusive(curr))
1985                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1986
1987         return added;
1988 }
1989
1990 /** Arbitrary reads from the future are not allowed.  Section 29.3
1991  * part 9 places some constraints.  This method checks one result of constraint
1992  * constraint.  Others require compiler support. */
1993 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1994 {
1995         if (!writer->is_rmw())
1996                 return true;
1997
1998         if (!reader->is_rmw())
1999                 return true;
2000
2001         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2002                 if (search == reader)
2003                         return false;
2004                 if (search->get_tid() == reader->get_tid() &&
2005                                 search->happens_before(reader))
2006                         break;
2007         }
2008
2009         return true;
2010 }
2011
2012 /**
2013  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2014  * some constraints. This method checks one the following constraint (others
2015  * require compiler support):
2016  *
2017  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2018  */
2019 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2020 {
2021         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2022         unsigned int i;
2023         /* Iterate over all threads */
2024         for (i = 0; i < thrd_lists->size(); i++) {
2025                 const ModelAction *write_after_read = NULL;
2026
2027                 /* Iterate over actions in thread, starting from most recent */
2028                 action_list_t *list = &(*thrd_lists)[i];
2029                 action_list_t::reverse_iterator rit;
2030                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2031                         ModelAction *act = *rit;
2032
2033                         /* Don't disallow due to act == reader */
2034                         if (!reader->happens_before(act) || reader == act)
2035                                 break;
2036                         else if (act->is_write())
2037                                 write_after_read = act;
2038                         else if (act->is_read() && act->get_reads_from() != NULL)
2039                                 write_after_read = act->get_reads_from();
2040                 }
2041
2042                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2043                         return false;
2044         }
2045         return true;
2046 }
2047
2048 /**
2049  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2050  * The ModelAction under consideration is expected to be taking part in
2051  * release/acquire synchronization as an object of the "reads from" relation.
2052  * Note that this can only provide release sequence support for RMW chains
2053  * which do not read from the future, as those actions cannot be traced until
2054  * their "promise" is fulfilled. Similarly, we may not even establish the
2055  * presence of a release sequence with certainty, as some modification order
2056  * constraints may be decided further in the future. Thus, this function
2057  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2058  * and a boolean representing certainty.
2059  *
2060  * @param rf The action that might be part of a release sequence. Must be a
2061  * write.
2062  * @param release_heads A pass-by-reference style return parameter. After
2063  * execution of this function, release_heads will contain the heads of all the
2064  * relevant release sequences, if any exists with certainty
2065  * @param pending A pass-by-reference style return parameter which is only used
2066  * when returning false (i.e., uncertain). Returns most information regarding
2067  * an uncertain release sequence, including any write operations that might
2068  * break the sequence.
2069  * @return true, if the ModelChecker is certain that release_heads is complete;
2070  * false otherwise
2071  */
2072 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2073                 rel_heads_list_t *release_heads,
2074                 struct release_seq *pending) const
2075 {
2076         /* Only check for release sequences if there are no cycles */
2077         if (mo_graph->checkForCycles())
2078                 return false;
2079
2080         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2081                 ASSERT(rf->is_write());
2082
2083                 if (rf->is_release())
2084                         release_heads->push_back(rf);
2085                 else if (rf->get_last_fence_release())
2086                         release_heads->push_back(rf->get_last_fence_release());
2087                 if (!rf->is_rmw())
2088                         break; /* End of RMW chain */
2089
2090                 /** @todo Need to be smarter here...  In the linux lock
2091                  * example, this will run to the beginning of the program for
2092                  * every acquire. */
2093                 /** @todo The way to be smarter here is to keep going until 1
2094                  * thread has a release preceded by an acquire and you've seen
2095                  *       both. */
2096
2097                 /* acq_rel RMW is a sufficient stopping condition */
2098                 if (rf->is_acquire() && rf->is_release())
2099                         return true; /* complete */
2100         };
2101         if (!rf) {
2102                 /* read from future: need to settle this later */
2103                 pending->rf = NULL;
2104                 return false; /* incomplete */
2105         }
2106
2107         if (rf->is_release())
2108                 return true; /* complete */
2109
2110         /* else relaxed write
2111          * - check for fence-release in the same thread (29.8, stmt. 3)
2112          * - check modification order for contiguous subsequence
2113          *   -> rf must be same thread as release */
2114
2115         const ModelAction *fence_release = rf->get_last_fence_release();
2116         /* Synchronize with a fence-release unconditionally; we don't need to
2117          * find any more "contiguous subsequence..." for it */
2118         if (fence_release)
2119                 release_heads->push_back(fence_release);
2120
2121         int tid = id_to_int(rf->get_tid());
2122         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2123         action_list_t *list = &(*thrd_lists)[tid];
2124         action_list_t::const_reverse_iterator rit;
2125
2126         /* Find rf in the thread list */
2127         rit = std::find(list->rbegin(), list->rend(), rf);
2128         ASSERT(rit != list->rend());
2129
2130         /* Find the last {write,fence}-release */
2131         for (; rit != list->rend(); rit++) {
2132                 if (fence_release && *(*rit) < *fence_release)
2133                         break;
2134                 if ((*rit)->is_release())
2135                         break;
2136         }
2137         if (rit == list->rend()) {
2138                 /* No write-release in this thread */
2139                 return true; /* complete */
2140         } else if (fence_release && *(*rit) < *fence_release) {
2141                 /* The fence-release is more recent (and so, "stronger") than
2142                  * the most recent write-release */
2143                 return true; /* complete */
2144         } /* else, need to establish contiguous release sequence */
2145         ModelAction *release = *rit;
2146
2147         ASSERT(rf->same_thread(release));
2148
2149         pending->writes.clear();
2150
2151         bool certain = true;
2152         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2153                 if (id_to_int(rf->get_tid()) == (int)i)
2154                         continue;
2155                 list = &(*thrd_lists)[i];
2156
2157                 /* Can we ensure no future writes from this thread may break
2158                  * the release seq? */
2159                 bool future_ordered = false;
2160
2161                 ModelAction *last = get_last_action(int_to_id(i));
2162                 Thread *th = get_thread(int_to_id(i));
2163                 if ((last && rf->happens_before(last)) ||
2164                                 !is_enabled(th) ||
2165                                 th->is_complete())
2166                         future_ordered = true;
2167
2168                 ASSERT(!th->is_model_thread() || future_ordered);
2169
2170                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2171                         const ModelAction *act = *rit;
2172                         /* Reach synchronization -> this thread is complete */
2173                         if (act->happens_before(release))
2174                                 break;
2175                         if (rf->happens_before(act)) {
2176                                 future_ordered = true;
2177                                 continue;
2178                         }
2179
2180                         /* Only non-RMW writes can break release sequences */
2181                         if (!act->is_write() || act->is_rmw())
2182                                 continue;
2183
2184                         /* Check modification order */
2185                         if (mo_graph->checkReachable(rf, act)) {
2186                                 /* rf --mo--> act */
2187                                 future_ordered = true;
2188                                 continue;
2189                         }
2190                         if (mo_graph->checkReachable(act, release))
2191                                 /* act --mo--> release */
2192                                 break;
2193                         if (mo_graph->checkReachable(release, act) &&
2194                                       mo_graph->checkReachable(act, rf)) {
2195                                 /* release --mo-> act --mo--> rf */
2196                                 return true; /* complete */
2197                         }
2198                         /* act may break release sequence */
2199                         pending->writes.push_back(act);
2200                         certain = false;
2201                 }
2202                 if (!future_ordered)
2203                         certain = false; /* This thread is uncertain */
2204         }
2205
2206         if (certain) {
2207                 release_heads->push_back(release);
2208                 pending->writes.clear();
2209         } else {
2210                 pending->release = release;
2211                 pending->rf = rf;
2212         }
2213         return certain;
2214 }
2215
2216 /**
2217  * An interface for getting the release sequence head(s) with which a
2218  * given ModelAction must synchronize. This function only returns a non-empty
2219  * result when it can locate a release sequence head with certainty. Otherwise,
2220  * it may mark the internal state of the ModelChecker so that it will handle
2221  * the release sequence at a later time, causing @a acquire to update its
2222  * synchronization at some later point in execution.
2223  *
2224  * @param acquire The 'acquire' action that may synchronize with a release
2225  * sequence
2226  * @param read The read action that may read from a release sequence; this may
2227  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2228  * when 'acquire' is a fence-acquire)
2229  * @param release_heads A pass-by-reference return parameter. Will be filled
2230  * with the head(s) of the release sequence(s), if they exists with certainty.
2231  * @see ModelChecker::release_seq_heads
2232  */
2233 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2234                 ModelAction *read, rel_heads_list_t *release_heads)
2235 {
2236         const ModelAction *rf = read->get_reads_from();
2237         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2238         sequence->acquire = acquire;
2239         sequence->read = read;
2240
2241         if (!release_seq_heads(rf, release_heads, sequence)) {
2242                 /* add act to 'lazy checking' list */
2243                 pending_rel_seqs->push_back(sequence);
2244         } else {
2245                 snapshot_free(sequence);
2246         }
2247 }
2248
2249 /**
2250  * Attempt to resolve all stashed operations that might synchronize with a
2251  * release sequence for a given location. This implements the "lazy" portion of
2252  * determining whether or not a release sequence was contiguous, since not all
2253  * modification order information is present at the time an action occurs.
2254  *
2255  * @param location The location/object that should be checked for release
2256  * sequence resolutions. A NULL value means to check all locations.
2257  * @param work_queue The work queue to which to add work items as they are
2258  * generated
2259  * @return True if any updates occurred (new synchronization, new mo_graph
2260  * edges)
2261  */
2262 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2263 {
2264         bool updated = false;
2265         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2266         while (it != pending_rel_seqs->end()) {
2267                 struct release_seq *pending = *it;
2268                 ModelAction *acquire = pending->acquire;
2269                 const ModelAction *read = pending->read;
2270
2271                 /* Only resolve sequences on the given location, if provided */
2272                 if (location && read->get_location() != location) {
2273                         it++;
2274                         continue;
2275                 }
2276
2277                 const ModelAction *rf = read->get_reads_from();
2278                 rel_heads_list_t release_heads;
2279                 bool complete;
2280                 complete = release_seq_heads(rf, &release_heads, pending);
2281                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2282                         if (!acquire->has_synchronized_with(release_heads[i])) {
2283                                 if (acquire->synchronize_with(release_heads[i]))
2284                                         updated = true;
2285                                 else
2286                                         set_bad_synchronization();
2287                         }
2288                 }
2289
2290                 if (updated) {
2291                         /* Re-check all pending release sequences */
2292                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2293                         /* Re-check read-acquire for mo_graph edges */
2294                         if (acquire->is_read())
2295                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2296
2297                         /* propagate synchronization to later actions */
2298                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2299                         for (; (*rit) != acquire; rit++) {
2300                                 ModelAction *propagate = *rit;
2301                                 if (acquire->happens_before(propagate)) {
2302                                         propagate->synchronize_with(acquire);
2303                                         /* Re-check 'propagate' for mo_graph edges */
2304                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2305                                 }
2306                         }
2307                 }
2308                 if (complete) {
2309                         it = pending_rel_seqs->erase(it);
2310                         snapshot_free(pending);
2311                 } else {
2312                         it++;
2313                 }
2314         }
2315
2316         // If we resolved promises or data races, see if we have realized a data race.
2317         checkDataRaces();
2318
2319         return updated;
2320 }
2321
2322 /**
2323  * Performs various bookkeeping operations for the current ModelAction. For
2324  * instance, adds action to the per-object, per-thread action vector and to the
2325  * action trace list of all thread actions.
2326  *
2327  * @param act is the ModelAction to add.
2328  */
2329 void ModelChecker::add_action_to_lists(ModelAction *act)
2330 {
2331         int tid = id_to_int(act->get_tid());
2332         ModelAction *uninit = NULL;
2333         int uninit_id = -1;
2334         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2335         if (list->empty() && act->is_atomic_var()) {
2336                 uninit = new_uninitialized_action(act->get_location());
2337                 uninit_id = id_to_int(uninit->get_tid());
2338                 list->push_back(uninit);
2339         }
2340         list->push_back(act);
2341
2342         action_trace->push_back(act);
2343         if (uninit)
2344                 action_trace->push_front(uninit);
2345
2346         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2347         if (tid >= (int)vec->size())
2348                 vec->resize(priv->next_thread_id);
2349         (*vec)[tid].push_back(act);
2350         if (uninit)
2351                 (*vec)[uninit_id].push_front(uninit);
2352
2353         if ((int)thrd_last_action->size() <= tid)
2354                 thrd_last_action->resize(get_num_threads());
2355         (*thrd_last_action)[tid] = act;
2356         if (uninit)
2357                 (*thrd_last_action)[uninit_id] = uninit;
2358
2359         if (act->is_fence() && act->is_release()) {
2360                 if ((int)thrd_last_fence_release->size() <= tid)
2361                         thrd_last_fence_release->resize(get_num_threads());
2362                 (*thrd_last_fence_release)[tid] = act;
2363         }
2364
2365         if (act->is_wait()) {
2366                 void *mutex_loc = (void *) act->get_value();
2367                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2368
2369                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2370                 if (tid >= (int)vec->size())
2371                         vec->resize(priv->next_thread_id);
2372                 (*vec)[tid].push_back(act);
2373         }
2374 }
2375
2376 /**
2377  * @brief Get the last action performed by a particular Thread
2378  * @param tid The thread ID of the Thread in question
2379  * @return The last action in the thread
2380  */
2381 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2382 {
2383         int threadid = id_to_int(tid);
2384         if (threadid < (int)thrd_last_action->size())
2385                 return (*thrd_last_action)[id_to_int(tid)];
2386         else
2387                 return NULL;
2388 }
2389
2390 /**
2391  * @brief Get the last fence release performed by a particular Thread
2392  * @param tid The thread ID of the Thread in question
2393  * @return The last fence release in the thread, if one exists; NULL otherwise
2394  */
2395 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2396 {
2397         int threadid = id_to_int(tid);
2398         if (threadid < (int)thrd_last_fence_release->size())
2399                 return (*thrd_last_fence_release)[id_to_int(tid)];
2400         else
2401                 return NULL;
2402 }
2403
2404 /**
2405  * Gets the last memory_order_seq_cst write (in the total global sequence)
2406  * performed on a particular object (i.e., memory location), not including the
2407  * current action.
2408  * @param curr The current ModelAction; also denotes the object location to
2409  * check
2410  * @return The last seq_cst write
2411  */
2412 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2413 {
2414         void *location = curr->get_location();
2415         action_list_t *list = get_safe_ptr_action(obj_map, location);
2416         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2417         action_list_t::reverse_iterator rit;
2418         for (rit = list->rbegin(); rit != list->rend(); rit++)
2419                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2420                         return *rit;
2421         return NULL;
2422 }
2423
2424 /**
2425  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2426  * performed in a particular thread, prior to a particular fence.
2427  * @param tid The ID of the thread to check
2428  * @param before_fence The fence from which to begin the search; if NULL, then
2429  * search for the most recent fence in the thread.
2430  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2431  */
2432 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2433 {
2434         /* All fences should have NULL location */
2435         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2436         action_list_t::reverse_iterator rit = list->rbegin();
2437
2438         if (before_fence) {
2439                 for (; rit != list->rend(); rit++)
2440                         if (*rit == before_fence)
2441                                 break;
2442
2443                 ASSERT(*rit == before_fence);
2444                 rit++;
2445         }
2446
2447         for (; rit != list->rend(); rit++)
2448                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2449                         return *rit;
2450         return NULL;
2451 }
2452
2453 /**
2454  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2455  * location). This function identifies the mutex according to the current
2456  * action, which is presumed to perform on the same mutex.
2457  * @param curr The current ModelAction; also denotes the object location to
2458  * check
2459  * @return The last unlock operation
2460  */
2461 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2462 {
2463         void *location = curr->get_location();
2464         action_list_t *list = get_safe_ptr_action(obj_map, location);
2465         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2466         action_list_t::reverse_iterator rit;
2467         for (rit = list->rbegin(); rit != list->rend(); rit++)
2468                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2469                         return *rit;
2470         return NULL;
2471 }
2472
2473 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2474 {
2475         ModelAction *parent = get_last_action(tid);
2476         if (!parent)
2477                 parent = get_thread(tid)->get_creation();
2478         return parent;
2479 }
2480
2481 /**
2482  * Returns the clock vector for a given thread.
2483  * @param tid The thread whose clock vector we want
2484  * @return Desired clock vector
2485  */
2486 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2487 {
2488         return get_parent_action(tid)->get_cv();
2489 }
2490
2491 /**
2492  * @brief Find the promise, if any to resolve for the current action
2493  * @param curr The current ModelAction. Should be a write.
2494  * @return The (non-negative) index for the Promise to resolve, if any;
2495  * otherwise -1
2496  */
2497 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2498 {
2499         for (unsigned int i = 0; i < promises->size(); i++)
2500                 if (curr->get_node()->get_promise(i))
2501                         return i;
2502         return -1;
2503 }
2504
2505 /**
2506  * Resolve a Promise with a current write.
2507  * @param write The ModelAction that is fulfilling Promises
2508  * @param promise_idx The index corresponding to the promise
2509  * @return True if the Promise was successfully resolved; false otherwise
2510  */
2511 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2512 {
2513         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2514         Promise *promise = (*promises)[promise_idx];
2515
2516         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2517                 ModelAction *read = promise->get_reader(i);
2518                 read_from(read, write);
2519                 actions_to_check.push_back(read);
2520         }
2521         /* Make sure the promise's value matches the write's value */
2522         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2523         if (!mo_graph->resolvePromise(promise, write))
2524                 priv->failed_promise = true;
2525
2526         promises->erase(promises->begin() + promise_idx);
2527         delete promise;
2528
2529         //Check whether reading these writes has made threads unable to
2530         //resolve promises
2531
2532         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2533                 ModelAction *read = actions_to_check[i];
2534                 mo_check_promises(read, true);
2535         }
2536
2537         return true;
2538 }
2539
2540 /**
2541  * Compute the set of promises that could potentially be satisfied by this
2542  * action. Note that the set computation actually appears in the Node, not in
2543  * ModelChecker.
2544  * @param curr The ModelAction that may satisfy promises
2545  */
2546 void ModelChecker::compute_promises(ModelAction *curr)
2547 {
2548         for (unsigned int i = 0; i < promises->size(); i++) {
2549                 Promise *promise = (*promises)[i];
2550                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2551                         continue;
2552
2553                 bool satisfy = true;
2554                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2555                         const ModelAction *act = promise->get_reader(j);
2556                         if (act->happens_before(curr) ||
2557                                         act->could_synchronize_with(curr)) {
2558                                 satisfy = false;
2559                                 break;
2560                         }
2561                 }
2562                 if (satisfy)
2563                         curr->get_node()->set_promise(i);
2564         }
2565 }
2566
2567 /** Checks promises in response to change in ClockVector Threads. */
2568 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2569 {
2570         for (unsigned int i = 0; i < promises->size(); i++) {
2571                 Promise *promise = (*promises)[i];
2572                 if (!promise->thread_is_available(tid))
2573                         continue;
2574                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2575                         const ModelAction *act = promise->get_reader(j);
2576                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2577                                         merge_cv->synchronized_since(act)) {
2578                                 if (promise->eliminate_thread(tid)) {
2579                                         /* Promise has failed */
2580                                         priv->failed_promise = true;
2581                                         return;
2582                                 }
2583                         }
2584                 }
2585         }
2586 }
2587
2588 void ModelChecker::check_promises_thread_disabled()
2589 {
2590         for (unsigned int i = 0; i < promises->size(); i++) {
2591                 Promise *promise = (*promises)[i];
2592                 if (promise->has_failed()) {
2593                         priv->failed_promise = true;
2594                         return;
2595                 }
2596         }
2597 }
2598
2599 /**
2600  * @brief Checks promises in response to addition to modification order for
2601  * threads.
2602  *
2603  * We test whether threads are still available for satisfying promises after an
2604  * addition to our modification order constraints. Those that are unavailable
2605  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2606  * that promise has failed.
2607  *
2608  * @param act The ModelAction which updated the modification order
2609  * @param is_read_check Should be true if act is a read and we must check for
2610  * updates to the store from which it read (there is a distinction here for
2611  * RMW's, which are both a load and a store)
2612  */
2613 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2614 {
2615         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2616
2617         for (unsigned int i = 0; i < promises->size(); i++) {
2618                 Promise *promise = (*promises)[i];
2619
2620                 // Is this promise on the same location?
2621                 if (!promise->same_location(write))
2622                         continue;
2623
2624                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2625                         const ModelAction *pread = promise->get_reader(j);
2626                         if (!pread->happens_before(act))
2627                                continue;
2628                         if (mo_graph->checkPromise(write, promise)) {
2629                                 priv->failed_promise = true;
2630                                 return;
2631                         }
2632                         break;
2633                 }
2634
2635                 // Don't do any lookups twice for the same thread
2636                 if (!promise->thread_is_available(act->get_tid()))
2637                         continue;
2638
2639                 if (mo_graph->checkReachable(promise, write)) {
2640                         if (mo_graph->checkPromise(write, promise)) {
2641                                 priv->failed_promise = true;
2642                                 return;
2643                         }
2644                 }
2645         }
2646 }
2647
2648 /**
2649  * Compute the set of writes that may break the current pending release
2650  * sequence. This information is extracted from previou release sequence
2651  * calculations.
2652  *
2653  * @param curr The current ModelAction. Must be a release sequence fixup
2654  * action.
2655  */
2656 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2657 {
2658         if (pending_rel_seqs->empty())
2659                 return;
2660
2661         struct release_seq *pending = pending_rel_seqs->back();
2662         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2663                 const ModelAction *write = pending->writes[i];
2664                 curr->get_node()->add_relseq_break(write);
2665         }
2666
2667         /* NULL means don't break the sequence; just synchronize */
2668         curr->get_node()->add_relseq_break(NULL);
2669 }
2670
2671 /**
2672  * Build up an initial set of all past writes that this 'read' action may read
2673  * from, as well as any previously-observed future values that must still be valid.
2674  *
2675  * @param curr is the current ModelAction that we are exploring; it must be a
2676  * 'read' operation.
2677  */
2678 void ModelChecker::build_may_read_from(ModelAction *curr)
2679 {
2680         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2681         unsigned int i;
2682         ASSERT(curr->is_read());
2683
2684         ModelAction *last_sc_write = NULL;
2685
2686         if (curr->is_seqcst())
2687                 last_sc_write = get_last_seq_cst_write(curr);
2688
2689         /* Iterate over all threads */
2690         for (i = 0; i < thrd_lists->size(); i++) {
2691                 /* Iterate over actions in thread, starting from most recent */
2692                 action_list_t *list = &(*thrd_lists)[i];
2693                 action_list_t::reverse_iterator rit;
2694                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2695                         ModelAction *act = *rit;
2696
2697                         /* Only consider 'write' actions */
2698                         if (!act->is_write() || act == curr)
2699                                 continue;
2700
2701                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2702                         bool allow_read = true;
2703
2704                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2705                                 allow_read = false;
2706                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2707                                 allow_read = false;
2708
2709                         if (allow_read) {
2710                                 /* Only add feasible reads */
2711                                 mo_graph->startChanges();
2712                                 r_modification_order(curr, act);
2713                                 if (!is_infeasible())
2714                                         curr->get_node()->add_read_from_past(act);
2715                                 mo_graph->rollbackChanges();
2716                         }
2717
2718                         /* Include at most one act per-thread that "happens before" curr */
2719                         if (act->happens_before(curr))
2720                                 break;
2721                 }
2722         }
2723
2724         /* Inherit existing, promised future values */
2725         for (i = 0; i < promises->size(); i++) {
2726                 const Promise *promise = (*promises)[i];
2727                 const ModelAction *promise_read = promise->get_reader(0);
2728                 if (promise_read->same_var(curr)) {
2729                         /* Only add feasible future-values */
2730                         mo_graph->startChanges();
2731                         r_modification_order(curr, promise);
2732                         if (!is_infeasible())
2733                                 curr->get_node()->add_read_from_promise(promise_read);
2734                         mo_graph->rollbackChanges();
2735                 }
2736         }
2737
2738         /* We may find no valid may-read-from only if the execution is doomed */
2739         if (!curr->get_node()->read_from_size()) {
2740                 priv->no_valid_reads = true;
2741                 set_assert();
2742         }
2743
2744         if (DBG_ENABLED()) {
2745                 model_print("Reached read action:\n");
2746                 curr->print();
2747                 model_print("Printing read_from_past\n");
2748                 curr->get_node()->print_read_from_past();
2749                 model_print("End printing read_from_past\n");
2750         }
2751 }
2752
2753 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2754 {
2755         for ( ; write != NULL; write = write->get_reads_from()) {
2756                 /* UNINIT actions don't have a Node, and they never sleep */
2757                 if (write->is_uninitialized())
2758                         return true;
2759                 Node *prevnode = write->get_node()->get_parent();
2760
2761                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2762                 if (write->is_release() && thread_sleep)
2763                         return true;
2764                 if (!write->is_rmw())
2765                         return false;
2766         }
2767         return true;
2768 }
2769
2770 /**
2771  * @brief Create a new action representing an uninitialized atomic
2772  * @param location The memory location of the atomic object
2773  * @return A pointer to a new ModelAction
2774  */
2775 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2776 {
2777         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2778         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2779         act->create_cv(NULL);
2780         return act;
2781 }
2782
2783 static void print_list(action_list_t *list)
2784 {
2785         action_list_t::iterator it;
2786
2787         model_print("---------------------------------------------------------------------\n");
2788
2789         unsigned int hash = 0;
2790
2791         for (it = list->begin(); it != list->end(); it++) {
2792                 (*it)->print();
2793                 hash = hash^(hash<<3)^((*it)->hash());
2794         }
2795         model_print("HASH %u\n", hash);
2796         model_print("---------------------------------------------------------------------\n");
2797 }
2798
2799 #if SUPPORT_MOD_ORDER_DUMP
2800 void ModelChecker::dumpGraph(char *filename) const
2801 {
2802         char buffer[200];
2803         sprintf(buffer, "%s.dot", filename);
2804         FILE *file = fopen(buffer, "w");
2805         fprintf(file, "digraph %s {\n", filename);
2806         mo_graph->dumpNodes(file);
2807         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2808
2809         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2810                 ModelAction *act = *it;
2811                 if (act->is_read()) {
2812                         mo_graph->dot_print_node(file, act);
2813                         if (act->get_reads_from())
2814                                 mo_graph->dot_print_edge(file,
2815                                                 act->get_reads_from(),
2816                                                 act,
2817                                                 "label=\"rf\", color=red, weight=2");
2818                         else
2819                                 mo_graph->dot_print_edge(file,
2820                                                 act->get_reads_from_promise(),
2821                                                 act,
2822                                                 "label=\"rf\", color=red");
2823                 }
2824                 if (thread_array[act->get_tid()]) {
2825                         mo_graph->dot_print_edge(file,
2826                                         thread_array[id_to_int(act->get_tid())],
2827                                         act,
2828                                         "label=\"sb\", color=blue, weight=400");
2829                 }
2830
2831                 thread_array[act->get_tid()] = act;
2832         }
2833         fprintf(file, "}\n");
2834         model_free(thread_array);
2835         fclose(file);
2836 }
2837 #endif
2838
2839 /** @brief Prints an execution trace summary. */
2840 void ModelChecker::print_summary() const
2841 {
2842 #if SUPPORT_MOD_ORDER_DUMP
2843         char buffername[100];
2844         sprintf(buffername, "exec%04u", stats.num_total);
2845         mo_graph->dumpGraphToFile(buffername);
2846         sprintf(buffername, "graph%04u", stats.num_total);
2847         dumpGraph(buffername);
2848 #endif
2849
2850         model_print("Execution %d:", stats.num_total);
2851         if (isfeasibleprefix()) {
2852                 if (scheduler->all_threads_sleeping())
2853                         model_print(" SLEEP-SET REDUNDANT");
2854                 model_print("\n");
2855         } else
2856                 print_infeasibility(" INFEASIBLE");
2857         print_list(action_trace);
2858         model_print("\n");
2859 }
2860
2861 /**
2862  * Add a Thread to the system for the first time. Should only be called once
2863  * per thread.
2864  * @param t The Thread to add
2865  */
2866 void ModelChecker::add_thread(Thread *t)
2867 {
2868         thread_map->put(id_to_int(t->get_id()), t);
2869         scheduler->add_thread(t);
2870 }
2871
2872 /**
2873  * Removes a thread from the scheduler.
2874  * @param the thread to remove.
2875  */
2876 void ModelChecker::remove_thread(Thread *t)
2877 {
2878         scheduler->remove_thread(t);
2879 }
2880
2881 /**
2882  * @brief Get a Thread reference by its ID
2883  * @param tid The Thread's ID
2884  * @return A Thread reference
2885  */
2886 Thread * ModelChecker::get_thread(thread_id_t tid) const
2887 {
2888         return thread_map->get(id_to_int(tid));
2889 }
2890
2891 /**
2892  * @brief Get a reference to the Thread in which a ModelAction was executed
2893  * @param act The ModelAction
2894  * @return A Thread reference
2895  */
2896 Thread * ModelChecker::get_thread(const ModelAction *act) const
2897 {
2898         return get_thread(act->get_tid());
2899 }
2900
2901 /**
2902  * @brief Get a Promise's "promise number"
2903  *
2904  * A "promise number" is an index number that is unique to a promise, valid
2905  * only for a specific snapshot of an execution trace. Promises may come and go
2906  * as they are generated an resolved, so an index only retains meaning for the
2907  * current snapshot.
2908  *
2909  * @param promise The Promise to check
2910  * @return The promise index, if the promise still is valid; otherwise -1
2911  */
2912 int ModelChecker::get_promise_number(const Promise *promise) const
2913 {
2914         for (unsigned int i = 0; i < promises->size(); i++)
2915                 if ((*promises)[i] == promise)
2916                         return i;
2917         /* Not found */
2918         return -1;
2919 }
2920
2921 /**
2922  * @brief Check if a Thread is currently enabled
2923  * @param t The Thread to check
2924  * @return True if the Thread is currently enabled
2925  */
2926 bool ModelChecker::is_enabled(Thread *t) const
2927 {
2928         return scheduler->is_enabled(t);
2929 }
2930
2931 /**
2932  * @brief Check if a Thread is currently enabled
2933  * @param tid The ID of the Thread to check
2934  * @return True if the Thread is currently enabled
2935  */
2936 bool ModelChecker::is_enabled(thread_id_t tid) const
2937 {
2938         return scheduler->is_enabled(tid);
2939 }
2940
2941 /**
2942  * Switch from a model-checker context to a user-thread context. This is the
2943  * complement of ModelChecker::switch_to_master and must be called from the
2944  * model-checker context
2945  *
2946  * @param thread The user-thread to switch to
2947  */
2948 void ModelChecker::switch_from_master(Thread *thread)
2949 {
2950         scheduler->set_current_thread(thread);
2951         Thread::swap(&system_context, thread);
2952 }
2953
2954 /**
2955  * Switch from a user-context to the "master thread" context (a.k.a. system
2956  * context). This switch is made with the intention of exploring a particular
2957  * model-checking action (described by a ModelAction object). Must be called
2958  * from a user-thread context.
2959  *
2960  * @param act The current action that will be explored. May be NULL only if
2961  * trace is exiting via an assertion (see ModelChecker::set_assert and
2962  * ModelChecker::has_asserted).
2963  * @return Return the value returned by the current action
2964  */
2965 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2966 {
2967         DBG();
2968         Thread *old = thread_current();
2969         ASSERT(!old->get_pending());
2970         old->set_pending(act);
2971         if (Thread::swap(old, &system_context) < 0) {
2972                 perror("swap threads");
2973                 exit(EXIT_FAILURE);
2974         }
2975         return old->get_return_value();
2976 }
2977
2978 /**
2979  * Takes the next step in the execution, if possible.
2980  * @param curr The current step to take
2981  * @return Returns the next Thread to run, if any; NULL if this execution
2982  * should terminate
2983  */
2984 Thread * ModelChecker::take_step(ModelAction *curr)
2985 {
2986         Thread *curr_thrd = get_thread(curr);
2987         ASSERT(curr_thrd->get_state() == THREAD_READY);
2988
2989         curr = check_current_action(curr);
2990
2991         /* Infeasible -> don't take any more steps */
2992         if (is_infeasible())
2993                 return NULL;
2994         else if (isfeasibleprefix() && have_bug_reports()) {
2995                 set_assert();
2996                 return NULL;
2997         }
2998
2999         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3000                 return NULL;
3001
3002         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3003                 scheduler->remove_thread(curr_thrd);
3004
3005         Thread *next_thrd = get_next_thread(curr);
3006
3007         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3008                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3009
3010         return next_thrd;
3011 }
3012
3013 /** Wrapper to run the user's main function, with appropriate arguments */
3014 void user_main_wrapper(void *)
3015 {
3016         user_main(model->params.argc, model->params.argv);
3017 }
3018
3019 /** @brief Run ModelChecker for the user program */
3020 void ModelChecker::run()
3021 {
3022         do {
3023                 thrd_t user_thread;
3024                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3025                 add_thread(t);
3026
3027                 do {
3028                         /*
3029                          * Stash next pending action(s) for thread(s). There
3030                          * should only need to stash one thread's action--the
3031                          * thread which just took a step--plus the first step
3032                          * for any newly-created thread
3033                          */
3034                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3035                                 thread_id_t tid = int_to_id(i);
3036                                 Thread *thr = get_thread(tid);
3037                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3038                                         switch_from_master(thr);
3039                                 }
3040                         }
3041
3042                         /* Catch assertions from prior take_step or from
3043                          * between-ModelAction bugs (e.g., data races) */
3044                         if (has_asserted())
3045                                 break;
3046
3047                         /* Consume the next action for a Thread */
3048                         ModelAction *curr = t->get_pending();
3049                         t->set_pending(NULL);
3050                         t = take_step(curr);
3051                 } while (t && !t->is_model_thread());
3052
3053                 /*
3054                  * Launch end-of-execution release sequence fixups only when
3055                  * the execution is otherwise feasible AND there are:
3056                  *
3057                  * (1) pending release sequences
3058                  * (2) pending assertions that could be invalidated by a change
3059                  * in clock vectors (i.e., data races)
3060                  * (3) no pending promises
3061                  */
3062                 while (!pending_rel_seqs->empty() &&
3063                                 is_feasible_prefix_ignore_relseq() &&
3064                                 !unrealizedraces.empty()) {
3065                         model_print("*** WARNING: release sequence fixup action "
3066                                         "(%zu pending release seuqence(s)) ***\n",
3067                                         pending_rel_seqs->size());
3068                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3069                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3070                                         model_thread);
3071                         take_step(fixup);
3072                 };
3073         } while (next_execution());
3074
3075         model_print("******* Model-checking complete: *******\n");
3076         print_stats();
3077 }