618b37e00ee86ad5fd03c03216218e7f36ed0067
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else {
487                 stats.num_redundant++;
488
489                 /**
490                  * @todo We can violate this ASSERT() when fairness/sleep sets
491                  * conflict to cause an execution to terminate, e.g. with:
492                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
493                  */
494                 //ASSERT(scheduler->all_threads_sleeping());
495         }
496 }
497
498 /** @brief Print execution stats */
499 void ModelChecker::print_stats() const
500 {
501         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
502         model_print("Number of redundant executions: %d\n", stats.num_redundant);
503         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
504         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
505         model_print("Total executions: %d\n", stats.num_total);
506         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
507 }
508
509 /**
510  * @brief End-of-exeuction print
511  * @param printbugs Should any existing bugs be printed?
512  */
513 void ModelChecker::print_execution(bool printbugs) const
514 {
515         print_program_output();
516
517         if (DBG_ENABLED() || params.verbose) {
518                 model_print("Earliest divergence point since last feasible execution:\n");
519                 if (earliest_diverge)
520                         earliest_diverge->print();
521                 else
522                         model_print("(Not set)\n");
523
524                 model_print("\n");
525                 print_stats();
526         }
527
528         /* Don't print invalid bugs */
529         if (printbugs)
530                 print_bugs();
531
532         model_print("\n");
533         print_summary();
534 }
535
536 /**
537  * Queries the model-checker for more executions to explore and, if one
538  * exists, resets the model-checker state to execute a new execution.
539  *
540  * @return If there are more executions to explore, return true. Otherwise,
541  * return false.
542  */
543 bool ModelChecker::next_execution()
544 {
545         DBG();
546         /* Is this execution a feasible execution that's worth bug-checking? */
547         bool complete = isfeasibleprefix() && (is_complete_execution() ||
548                         have_bug_reports());
549
550         /* End-of-execution bug checks */
551         if (complete) {
552                 if (is_deadlocked())
553                         assert_bug("Deadlock detected");
554
555                 checkDataRaces();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /**
582  * @brief Find the last fence-related backtracking conflict for a ModelAction
583  *
584  * This function performs the search for the most recent conflicting action
585  * against which we should perform backtracking, as affected by fence
586  * operations. This includes pairs of potentially-synchronizing actions which
587  * occur due to fence-acquire or fence-release, and hence should be explored in
588  * the opposite execution order.
589  *
590  * @param act The current action
591  * @return The most recent action which conflicts with act due to fences
592  */
593 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
594 {
595         /* Only perform release/acquire fence backtracking for stores */
596         if (!act->is_write())
597                 return NULL;
598
599         /* Find a fence-release (or, act is a release) */
600         ModelAction *last_release;
601         if (act->is_release())
602                 last_release = act;
603         else
604                 last_release = get_last_fence_release(act->get_tid());
605         if (!last_release)
606                 return NULL;
607
608         /* Skip past the release */
609         action_list_t *list = action_trace;
610         action_list_t::reverse_iterator rit;
611         for (rit = list->rbegin(); rit != list->rend(); rit++)
612                 if (*rit == last_release)
613                         break;
614         ASSERT(rit != list->rend());
615
616         /* Find a prior:
617          *   load-acquire
618          * or
619          *   load --sb-> fence-acquire */
620         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
621         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
622         bool found_acquire_fences = false;
623         for ( ; rit != list->rend(); rit++) {
624                 ModelAction *prev = *rit;
625                 if (act->same_thread(prev))
626                         continue;
627
628                 int tid = id_to_int(prev->get_tid());
629
630                 if (prev->is_read() && act->same_var(prev)) {
631                         if (prev->is_acquire()) {
632                                 /* Found most recent load-acquire, don't need
633                                  * to search for more fences */
634                                 if (!found_acquire_fences)
635                                         return NULL;
636                         } else {
637                                 prior_loads[tid] = prev;
638                         }
639                 }
640                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
641                         found_acquire_fences = true;
642                         acquire_fences[tid] = prev;
643                 }
644         }
645
646         ModelAction *latest_backtrack = NULL;
647         for (unsigned int i = 0; i < acquire_fences.size(); i++)
648                 if (acquire_fences[i] && prior_loads[i])
649                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
650                                 latest_backtrack = acquire_fences[i];
651         return latest_backtrack;
652 }
653
654 /**
655  * @brief Find the last backtracking conflict for a ModelAction
656  *
657  * This function performs the search for the most recent conflicting action
658  * against which we should perform backtracking. This primary includes pairs of
659  * synchronizing actions which should be explored in the opposite execution
660  * order.
661  *
662  * @param act The current action
663  * @return The most recent action which conflicts with act
664  */
665 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
666 {
667         switch (act->get_type()) {
668         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
669         case ATOMIC_READ:
670         case ATOMIC_WRITE:
671         case ATOMIC_RMW: {
672                 ModelAction *ret = NULL;
673
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (prev->could_synchronize_with(act)) {
680                                 ret = prev;
681                                 break;
682                         }
683                 }
684
685                 ModelAction *ret2 = get_last_fence_conflict(act);
686                 if (!ret2)
687                         return ret;
688                 if (!ret)
689                         return ret2;
690                 if (*ret < *ret2)
691                         return ret2;
692                 return ret;
693         }
694         case ATOMIC_LOCK:
695         case ATOMIC_TRYLOCK: {
696                 /* linear search: from most recent to oldest */
697                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
698                 action_list_t::reverse_iterator rit;
699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
700                         ModelAction *prev = *rit;
701                         if (act->is_conflicting_lock(prev))
702                                 return prev;
703                 }
704                 break;
705         }
706         case ATOMIC_UNLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (!act->same_thread(prev) && prev->is_failed_trylock())
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                         if (!act->same_thread(prev) && prev->is_notify())
726                                 return prev;
727                 }
728                 break;
729         }
730
731         case ATOMIC_NOTIFY_ALL:
732         case ATOMIC_NOTIFY_ONE: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_wait())
739                                 return prev;
740                 }
741                 break;
742         }
743         default:
744                 break;
745         }
746         return NULL;
747 }
748
749 /** This method finds backtracking points where we should try to
750  * reorder the parameter ModelAction against.
751  *
752  * @param the ModelAction to find backtracking points for.
753  */
754 void ModelChecker::set_backtracking(ModelAction *act)
755 {
756         Thread *t = get_thread(act);
757         ModelAction *prev = get_last_conflict(act);
758         if (prev == NULL)
759                 return;
760
761         Node *node = prev->get_node()->get_parent();
762
763         int low_tid, high_tid;
764         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
765                 low_tid = id_to_int(act->get_tid());
766                 high_tid = low_tid + 1;
767         } else {
768                 low_tid = 0;
769                 high_tid = get_num_threads();
770         }
771
772         for (int i = low_tid; i < high_tid; i++) {
773                 thread_id_t tid = int_to_id(i);
774
775                 /* Make sure this thread can be enabled here. */
776                 if (i >= node->get_num_threads())
777                         break;
778
779                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
780                 if (node->enabled_status(tid) != THREAD_ENABLED)
781                         continue;
782
783                 /* Check if this has been explored already */
784                 if (node->has_been_explored(tid))
785                         continue;
786
787                 /* See if fairness allows */
788                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
789                         bool unfair = false;
790                         for (int t = 0; t < node->get_num_threads(); t++) {
791                                 thread_id_t tother = int_to_id(t);
792                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
793                                         unfair = true;
794                                         break;
795                                 }
796                         }
797                         if (unfair)
798                                 continue;
799                 }
800                 /* Cache the latest backtracking point */
801                 set_latest_backtrack(prev);
802
803                 /* If this is a new backtracking point, mark the tree */
804                 if (!node->set_backtrack(tid))
805                         continue;
806                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
807                                         id_to_int(prev->get_tid()),
808                                         id_to_int(t->get_id()));
809                 if (DBG_ENABLED()) {
810                         prev->print();
811                         act->print();
812                 }
813         }
814 }
815
816 /**
817  * @brief Cache the a backtracking point as the "most recent", if eligible
818  *
819  * Note that this does not prepare the NodeStack for this backtracking
820  * operation, it only caches the action on a per-execution basis
821  *
822  * @param act The operation at which we should explore a different next action
823  * (i.e., backtracking point)
824  * @return True, if this action is now the most recent backtracking point;
825  * false otherwise
826  */
827 bool ModelChecker::set_latest_backtrack(ModelAction *act)
828 {
829         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
830                 priv->next_backtrack = act;
831                 return true;
832         }
833         return false;
834 }
835
836 /**
837  * Returns last backtracking point. The model checker will explore a different
838  * path for this point in the next execution.
839  * @return The ModelAction at which the next execution should diverge.
840  */
841 ModelAction * ModelChecker::get_next_backtrack()
842 {
843         ModelAction *next = priv->next_backtrack;
844         priv->next_backtrack = NULL;
845         return next;
846 }
847
848 /**
849  * Processes a read model action.
850  * @param curr is the read model action to process.
851  * @return True if processing this read updates the mo_graph.
852  */
853 bool ModelChecker::process_read(ModelAction *curr)
854 {
855         Node *node = curr->get_node();
856         uint64_t value = VALUE_NONE;
857         bool updated = false;
858         while (true) {
859                 switch (node->get_read_from_status()) {
860                 case READ_FROM_PAST: {
861                         const ModelAction *rf = node->get_read_from_past();
862                         ASSERT(rf);
863
864                         mo_graph->startChanges();
865                         value = rf->get_value();
866                         check_recency(curr, rf);
867                         bool r_status = r_modification_order(curr, rf);
868
869                         if (is_infeasible() && node->increment_read_from()) {
870                                 mo_graph->rollbackChanges();
871                                 priv->too_many_reads = false;
872                                 continue;
873                         }
874
875                         read_from(curr, rf);
876                         mo_graph->commitChanges();
877                         mo_check_promises(curr, true);
878
879                         updated |= r_status;
880                         break;
881                 }
882                 case READ_FROM_PROMISE: {
883                         Promise *promise = curr->get_node()->get_read_from_promise();
884                         promise->add_reader(curr);
885                         value = promise->get_value();
886                         curr->set_read_from_promise(promise);
887                         mo_graph->startChanges();
888                         updated = r_modification_order(curr, promise);
889                         mo_graph->commitChanges();
890                         break;
891                 }
892                 case READ_FROM_FUTURE: {
893                         /* Read from future value */
894                         struct future_value fv = node->get_future_value();
895                         Promise *promise = new Promise(curr, fv);
896                         value = fv.value;
897                         curr->set_read_from_promise(promise);
898                         promises->push_back(promise);
899                         mo_graph->startChanges();
900                         updated = r_modification_order(curr, promise);
901                         mo_graph->commitChanges();
902                         break;
903                 }
904                 default:
905                         ASSERT(false);
906                 }
907                 get_thread(curr)->set_return_value(value);
908                 return updated;
909         }
910 }
911
912 /**
913  * Processes a lock, trylock, or unlock model action.  @param curr is
914  * the read model action to process.
915  *
916  * The try lock operation checks whether the lock is taken.  If not,
917  * it falls to the normal lock operation case.  If so, it returns
918  * fail.
919  *
920  * The lock operation has already been checked that it is enabled, so
921  * it just grabs the lock and synchronizes with the previous unlock.
922  *
923  * The unlock operation has to re-enable all of the threads that are
924  * waiting on the lock.
925  *
926  * @return True if synchronization was updated; false otherwise
927  */
928 bool ModelChecker::process_mutex(ModelAction *curr)
929 {
930         std::mutex *mutex = NULL;
931         struct std::mutex_state *state = NULL;
932
933         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
934                 mutex = (std::mutex *)curr->get_location();
935                 state = mutex->get_state();
936         } else if (curr->is_wait()) {
937                 mutex = (std::mutex *)curr->get_value();
938                 state = mutex->get_state();
939         }
940
941         switch (curr->get_type()) {
942         case ATOMIC_TRYLOCK: {
943                 bool success = !state->islocked;
944                 curr->set_try_lock(success);
945                 if (!success) {
946                         get_thread(curr)->set_return_value(0);
947                         break;
948                 }
949                 get_thread(curr)->set_return_value(1);
950         }
951                 //otherwise fall into the lock case
952         case ATOMIC_LOCK: {
953                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
954                         assert_bug("Lock access before initialization");
955                 state->islocked = true;
956                 ModelAction *unlock = get_last_unlock(curr);
957                 //synchronize with the previous unlock statement
958                 if (unlock != NULL) {
959                         curr->synchronize_with(unlock);
960                         return true;
961                 }
962                 break;
963         }
964         case ATOMIC_UNLOCK: {
965                 //unlock the lock
966                 state->islocked = false;
967                 //wake up the other threads
968                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
969                 //activate all the waiting threads
970                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
971                         scheduler->wake(get_thread(*rit));
972                 }
973                 waiters->clear();
974                 break;
975         }
976         case ATOMIC_WAIT: {
977                 //unlock the lock
978                 state->islocked = false;
979                 //wake up the other threads
980                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
981                 //activate all the waiting threads
982                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
983                         scheduler->wake(get_thread(*rit));
984                 }
985                 waiters->clear();
986                 //check whether we should go to sleep or not...simulate spurious failures
987                 if (curr->get_node()->get_misc() == 0) {
988                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
989                         //disable us
990                         scheduler->sleep(get_thread(curr));
991                 }
992                 break;
993         }
994         case ATOMIC_NOTIFY_ALL: {
995                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
996                 //activate all the waiting threads
997                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
998                         scheduler->wake(get_thread(*rit));
999                 }
1000                 waiters->clear();
1001                 break;
1002         }
1003         case ATOMIC_NOTIFY_ONE: {
1004                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1005                 int wakeupthread = curr->get_node()->get_misc();
1006                 action_list_t::iterator it = waiters->begin();
1007                 advance(it, wakeupthread);
1008                 scheduler->wake(get_thread(*it));
1009                 waiters->erase(it);
1010                 break;
1011         }
1012
1013         default:
1014                 ASSERT(0);
1015         }
1016         return false;
1017 }
1018
1019 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1020 {
1021         /* Do more ambitious checks now that mo is more complete */
1022         if (mo_may_allow(writer, reader)) {
1023                 Node *node = reader->get_node();
1024
1025                 /* Find an ancestor thread which exists at the time of the reader */
1026                 Thread *write_thread = get_thread(writer);
1027                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1028                         write_thread = write_thread->get_parent();
1029
1030                 struct future_value fv = {
1031                         writer->get_write_value(),
1032                         writer->get_seq_number() + params.maxfuturedelay,
1033                         write_thread->get_id(),
1034                 };
1035                 if (node->add_future_value(fv))
1036                         set_latest_backtrack(reader);
1037         }
1038 }
1039
1040 /**
1041  * Process a write ModelAction
1042  * @param curr The ModelAction to process
1043  * @return True if the mo_graph was updated or promises were resolved
1044  */
1045 bool ModelChecker::process_write(ModelAction *curr)
1046 {
1047         bool updated_mod_order = w_modification_order(curr);
1048         int promise_idx = get_promise_to_resolve(curr);
1049         bool updated_promises = false;
1050
1051         if (promise_idx >= 0)
1052                 updated_promises = resolve_promise(curr, promise_idx);
1053
1054         if (promises->size() == 0) {
1055                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1056                         struct PendingFutureValue pfv = (*futurevalues)[i];
1057                         add_future_value(pfv.writer, pfv.act);
1058                 }
1059                 futurevalues->clear();
1060         }
1061
1062         mo_graph->commitChanges();
1063         mo_check_promises(curr, false);
1064
1065         get_thread(curr)->set_return_value(VALUE_NONE);
1066         return updated_mod_order || updated_promises;
1067 }
1068
1069 /**
1070  * Process a fence ModelAction
1071  * @param curr The ModelAction to process
1072  * @return True if synchronization was updated
1073  */
1074 bool ModelChecker::process_fence(ModelAction *curr)
1075 {
1076         /*
1077          * fence-relaxed: no-op
1078          * fence-release: only log the occurence (not in this function), for
1079          *   use in later synchronization
1080          * fence-acquire (this function): search for hypothetical release
1081          *   sequences
1082          */
1083         bool updated = false;
1084         if (curr->is_acquire()) {
1085                 action_list_t *list = action_trace;
1086                 action_list_t::reverse_iterator rit;
1087                 /* Find X : is_read(X) && X --sb-> curr */
1088                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1089                         ModelAction *act = *rit;
1090                         if (act == curr)
1091                                 continue;
1092                         if (act->get_tid() != curr->get_tid())
1093                                 continue;
1094                         /* Stop at the beginning of the thread */
1095                         if (act->is_thread_start())
1096                                 break;
1097                         /* Stop once we reach a prior fence-acquire */
1098                         if (act->is_fence() && act->is_acquire())
1099                                 break;
1100                         if (!act->is_read())
1101                                 continue;
1102                         /* read-acquire will find its own release sequences */
1103                         if (act->is_acquire())
1104                                 continue;
1105
1106                         /* Establish hypothetical release sequences */
1107                         rel_heads_list_t release_heads;
1108                         get_release_seq_heads(curr, act, &release_heads);
1109                         for (unsigned int i = 0; i < release_heads.size(); i++)
1110                                 if (!curr->synchronize_with(release_heads[i]))
1111                                         set_bad_synchronization();
1112                         if (release_heads.size() != 0)
1113                                 updated = true;
1114                 }
1115         }
1116         return updated;
1117 }
1118
1119 /**
1120  * @brief Process the current action for thread-related activity
1121  *
1122  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1123  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1124  * synchronization, etc.  This function is a no-op for non-THREAD actions
1125  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1126  *
1127  * @param curr The current action
1128  * @return True if synchronization was updated or a thread completed
1129  */
1130 bool ModelChecker::process_thread_action(ModelAction *curr)
1131 {
1132         bool updated = false;
1133
1134         switch (curr->get_type()) {
1135         case THREAD_CREATE: {
1136                 thrd_t *thrd = (thrd_t *)curr->get_location();
1137                 struct thread_params *params = (struct thread_params *)curr->get_value();
1138                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1139                 add_thread(th);
1140                 th->set_creation(curr);
1141                 /* Promises can be satisfied by children */
1142                 for (unsigned int i = 0; i < promises->size(); i++) {
1143                         Promise *promise = (*promises)[i];
1144                         if (promise->thread_is_available(curr->get_tid()))
1145                                 promise->add_thread(th->get_id());
1146                 }
1147                 break;
1148         }
1149         case THREAD_JOIN: {
1150                 Thread *blocking = curr->get_thread_operand();
1151                 ModelAction *act = get_last_action(blocking->get_id());
1152                 curr->synchronize_with(act);
1153                 updated = true; /* trigger rel-seq checks */
1154                 break;
1155         }
1156         case THREAD_FINISH: {
1157                 Thread *th = get_thread(curr);
1158                 while (!th->wait_list_empty()) {
1159                         ModelAction *act = th->pop_wait_list();
1160                         scheduler->wake(get_thread(act));
1161                 }
1162                 th->complete();
1163                 /* Completed thread can't satisfy promises */
1164                 for (unsigned int i = 0; i < promises->size(); i++) {
1165                         Promise *promise = (*promises)[i];
1166                         if (promise->thread_is_available(th->get_id()))
1167                                 if (promise->eliminate_thread(th->get_id()))
1168                                         priv->failed_promise = true;
1169                 }
1170                 updated = true; /* trigger rel-seq checks */
1171                 break;
1172         }
1173         case THREAD_START: {
1174                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1175                 break;
1176         }
1177         default:
1178                 break;
1179         }
1180
1181         return updated;
1182 }
1183
1184 /**
1185  * @brief Process the current action for release sequence fixup activity
1186  *
1187  * Performs model-checker release sequence fixups for the current action,
1188  * forcing a single pending release sequence to break (with a given, potential
1189  * "loose" write) or to complete (i.e., synchronize). If a pending release
1190  * sequence forms a complete release sequence, then we must perform the fixup
1191  * synchronization, mo_graph additions, etc.
1192  *
1193  * @param curr The current action; must be a release sequence fixup action
1194  * @param work_queue The work queue to which to add work items as they are
1195  * generated
1196  */
1197 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1198 {
1199         const ModelAction *write = curr->get_node()->get_relseq_break();
1200         struct release_seq *sequence = pending_rel_seqs->back();
1201         pending_rel_seqs->pop_back();
1202         ASSERT(sequence);
1203         ModelAction *acquire = sequence->acquire;
1204         const ModelAction *rf = sequence->rf;
1205         const ModelAction *release = sequence->release;
1206         ASSERT(acquire);
1207         ASSERT(release);
1208         ASSERT(rf);
1209         ASSERT(release->same_thread(rf));
1210
1211         if (write == NULL) {
1212                 /**
1213                  * @todo Forcing a synchronization requires that we set
1214                  * modification order constraints. For instance, we can't allow
1215                  * a fixup sequence in which two separate read-acquire
1216                  * operations read from the same sequence, where the first one
1217                  * synchronizes and the other doesn't. Essentially, we can't
1218                  * allow any writes to insert themselves between 'release' and
1219                  * 'rf'
1220                  */
1221
1222                 /* Must synchronize */
1223                 if (!acquire->synchronize_with(release)) {
1224                         set_bad_synchronization();
1225                         return;
1226                 }
1227                 /* Re-check all pending release sequences */
1228                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1229                 /* Re-check act for mo_graph edges */
1230                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1231
1232                 /* propagate synchronization to later actions */
1233                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1234                 for (; (*rit) != acquire; rit++) {
1235                         ModelAction *propagate = *rit;
1236                         if (acquire->happens_before(propagate)) {
1237                                 propagate->synchronize_with(acquire);
1238                                 /* Re-check 'propagate' for mo_graph edges */
1239                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1240                         }
1241                 }
1242         } else {
1243                 /* Break release sequence with new edges:
1244                  *   release --mo--> write --mo--> rf */
1245                 mo_graph->addEdge(release, write);
1246                 mo_graph->addEdge(write, rf);
1247         }
1248
1249         /* See if we have realized a data race */
1250         checkDataRaces();
1251 }
1252
1253 /**
1254  * Initialize the current action by performing one or more of the following
1255  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1256  * in the NodeStack, manipulating backtracking sets, allocating and
1257  * initializing clock vectors, and computing the promises to fulfill.
1258  *
1259  * @param curr The current action, as passed from the user context; may be
1260  * freed/invalidated after the execution of this function, with a different
1261  * action "returned" its place (pass-by-reference)
1262  * @return True if curr is a newly-explored action; false otherwise
1263  */
1264 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1265 {
1266         ModelAction *newcurr;
1267
1268         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1269                 newcurr = process_rmw(*curr);
1270                 delete *curr;
1271
1272                 if (newcurr->is_rmw())
1273                         compute_promises(newcurr);
1274
1275                 *curr = newcurr;
1276                 return false;
1277         }
1278
1279         (*curr)->set_seq_number(get_next_seq_num());
1280
1281         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1282         if (newcurr) {
1283                 /* First restore type and order in case of RMW operation */
1284                 if ((*curr)->is_rmwr())
1285                         newcurr->copy_typeandorder(*curr);
1286
1287                 ASSERT((*curr)->get_location() == newcurr->get_location());
1288                 newcurr->copy_from_new(*curr);
1289
1290                 /* Discard duplicate ModelAction; use action from NodeStack */
1291                 delete *curr;
1292
1293                 /* Always compute new clock vector */
1294                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1295
1296                 *curr = newcurr;
1297                 return false; /* Action was explored previously */
1298         } else {
1299                 newcurr = *curr;
1300
1301                 /* Always compute new clock vector */
1302                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1303
1304                 /* Assign most recent release fence */
1305                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1306
1307                 /*
1308                  * Perform one-time actions when pushing new ModelAction onto
1309                  * NodeStack
1310                  */
1311                 if (newcurr->is_write())
1312                         compute_promises(newcurr);
1313                 else if (newcurr->is_relseq_fixup())
1314                         compute_relseq_breakwrites(newcurr);
1315                 else if (newcurr->is_wait())
1316                         newcurr->get_node()->set_misc_max(2);
1317                 else if (newcurr->is_notify_one()) {
1318                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1319                 }
1320                 return true; /* This was a new ModelAction */
1321         }
1322 }
1323
1324 /**
1325  * @brief Establish reads-from relation between two actions
1326  *
1327  * Perform basic operations involved with establishing a concrete rf relation,
1328  * including setting the ModelAction data and checking for release sequences.
1329  *
1330  * @param act The action that is reading (must be a read)
1331  * @param rf The action from which we are reading (must be a write)
1332  *
1333  * @return True if this read established synchronization
1334  */
1335 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1336 {
1337         ASSERT(rf);
1338         act->set_read_from(rf);
1339         if (act->is_acquire()) {
1340                 rel_heads_list_t release_heads;
1341                 get_release_seq_heads(act, act, &release_heads);
1342                 int num_heads = release_heads.size();
1343                 for (unsigned int i = 0; i < release_heads.size(); i++)
1344                         if (!act->synchronize_with(release_heads[i])) {
1345                                 set_bad_synchronization();
1346                                 num_heads--;
1347                         }
1348                 return num_heads > 0;
1349         }
1350         return false;
1351 }
1352
1353 /**
1354  * Check promises and eliminate potentially-satisfying threads when a thread is
1355  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1356  * no longer satisfy a promise generated from that thread.
1357  *
1358  * @param blocker The thread on which a thread is waiting
1359  * @param waiting The waiting thread
1360  */
1361 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1362 {
1363         for (unsigned int i = 0; i < promises->size(); i++) {
1364                 Promise *promise = (*promises)[i];
1365                 if (!promise->thread_is_available(waiting->get_id()))
1366                         continue;
1367                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1368                         ModelAction *reader = promise->get_reader(j);
1369                         if (reader->get_tid() != blocker->get_id())
1370                                 continue;
1371                         if (promise->eliminate_thread(waiting->get_id())) {
1372                                 /* Promise has failed */
1373                                 priv->failed_promise = true;
1374                         } else {
1375                                 /* Only eliminate the 'waiting' thread once */
1376                                 return;
1377                         }
1378                 }
1379         }
1380 }
1381
1382 /**
1383  * @brief Check whether a model action is enabled.
1384  *
1385  * Checks whether a lock or join operation would be successful (i.e., is the
1386  * lock already locked, or is the joined thread already complete). If not, put
1387  * the action in a waiter list.
1388  *
1389  * @param curr is the ModelAction to check whether it is enabled.
1390  * @return a bool that indicates whether the action is enabled.
1391  */
1392 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1393         if (curr->is_lock()) {
1394                 std::mutex *lock = (std::mutex *)curr->get_location();
1395                 struct std::mutex_state *state = lock->get_state();
1396                 if (state->islocked) {
1397                         //Stick the action in the appropriate waiting queue
1398                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1399                         return false;
1400                 }
1401         } else if (curr->get_type() == THREAD_JOIN) {
1402                 Thread *blocking = (Thread *)curr->get_location();
1403                 if (!blocking->is_complete()) {
1404                         blocking->push_wait_list(curr);
1405                         thread_blocking_check_promises(blocking, get_thread(curr));
1406                         return false;
1407                 }
1408         }
1409
1410         return true;
1411 }
1412
1413 /**
1414  * This is the heart of the model checker routine. It performs model-checking
1415  * actions corresponding to a given "current action." Among other processes, it
1416  * calculates reads-from relationships, updates synchronization clock vectors,
1417  * forms a memory_order constraints graph, and handles replay/backtrack
1418  * execution when running permutations of previously-observed executions.
1419  *
1420  * @param curr The current action to process
1421  * @return The ModelAction that is actually executed; may be different than
1422  * curr; may be NULL, if the current action is not enabled to run
1423  */
1424 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1425 {
1426         ASSERT(curr);
1427         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1428
1429         if (!check_action_enabled(curr)) {
1430                 /* Make the execution look like we chose to run this action
1431                  * much later, when a lock/join can succeed */
1432                 get_thread(curr)->set_pending(curr);
1433                 scheduler->sleep(get_thread(curr));
1434                 return NULL;
1435         }
1436
1437         bool newly_explored = initialize_curr_action(&curr);
1438
1439         DBG();
1440         if (DBG_ENABLED())
1441                 curr->print();
1442
1443         wake_up_sleeping_actions(curr);
1444
1445         /* Add the action to lists before any other model-checking tasks */
1446         if (!second_part_of_rmw)
1447                 add_action_to_lists(curr);
1448
1449         /* Build may_read_from set for newly-created actions */
1450         if (newly_explored && curr->is_read())
1451                 build_may_read_from(curr);
1452
1453         /* Initialize work_queue with the "current action" work */
1454         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1455         while (!work_queue.empty() && !has_asserted()) {
1456                 WorkQueueEntry work = work_queue.front();
1457                 work_queue.pop_front();
1458
1459                 switch (work.type) {
1460                 case WORK_CHECK_CURR_ACTION: {
1461                         ModelAction *act = work.action;
1462                         bool update = false; /* update this location's release seq's */
1463                         bool update_all = false; /* update all release seq's */
1464
1465                         if (process_thread_action(curr))
1466                                 update_all = true;
1467
1468                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1469                                 update = true;
1470
1471                         if (act->is_write() && process_write(act))
1472                                 update = true;
1473
1474                         if (act->is_fence() && process_fence(act))
1475                                 update_all = true;
1476
1477                         if (act->is_mutex_op() && process_mutex(act))
1478                                 update_all = true;
1479
1480                         if (act->is_relseq_fixup())
1481                                 process_relseq_fixup(curr, &work_queue);
1482
1483                         if (update_all)
1484                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1485                         else if (update)
1486                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1487                         break;
1488                 }
1489                 case WORK_CHECK_RELEASE_SEQ:
1490                         resolve_release_sequences(work.location, &work_queue);
1491                         break;
1492                 case WORK_CHECK_MO_EDGES: {
1493                         /** @todo Complete verification of work_queue */
1494                         ModelAction *act = work.action;
1495                         bool updated = false;
1496
1497                         if (act->is_read()) {
1498                                 const ModelAction *rf = act->get_reads_from();
1499                                 const Promise *promise = act->get_reads_from_promise();
1500                                 if (rf) {
1501                                         if (r_modification_order(act, rf))
1502                                                 updated = true;
1503                                 } else if (promise) {
1504                                         if (r_modification_order(act, promise))
1505                                                 updated = true;
1506                                 }
1507                         }
1508                         if (act->is_write()) {
1509                                 if (w_modification_order(act))
1510                                         updated = true;
1511                         }
1512                         mo_graph->commitChanges();
1513
1514                         if (updated)
1515                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1516                         break;
1517                 }
1518                 default:
1519                         ASSERT(false);
1520                         break;
1521                 }
1522         }
1523
1524         check_curr_backtracking(curr);
1525         set_backtracking(curr);
1526         return curr;
1527 }
1528
1529 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1530 {
1531         Node *currnode = curr->get_node();
1532         Node *parnode = currnode->get_parent();
1533
1534         if ((parnode && !parnode->backtrack_empty()) ||
1535                          !currnode->misc_empty() ||
1536                          !currnode->read_from_empty() ||
1537                          !currnode->promise_empty() ||
1538                          !currnode->relseq_break_empty()) {
1539                 set_latest_backtrack(curr);
1540         }
1541 }
1542
1543 bool ModelChecker::promises_expired() const
1544 {
1545         for (unsigned int i = 0; i < promises->size(); i++) {
1546                 Promise *promise = (*promises)[i];
1547                 if (promise->get_expiration() < priv->used_sequence_numbers)
1548                         return true;
1549         }
1550         return false;
1551 }
1552
1553 /**
1554  * This is the strongest feasibility check available.
1555  * @return whether the current trace (partial or complete) must be a prefix of
1556  * a feasible trace.
1557  */
1558 bool ModelChecker::isfeasibleprefix() const
1559 {
1560         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1561 }
1562
1563 /**
1564  * Print disagnostic information about an infeasible execution
1565  * @param prefix A string to prefix the output with; if NULL, then a default
1566  * message prefix will be provided
1567  */
1568 void ModelChecker::print_infeasibility(const char *prefix) const
1569 {
1570         char buf[100];
1571         char *ptr = buf;
1572         if (mo_graph->checkForCycles())
1573                 ptr += sprintf(ptr, "[mo cycle]");
1574         if (priv->failed_promise)
1575                 ptr += sprintf(ptr, "[failed promise]");
1576         if (priv->too_many_reads)
1577                 ptr += sprintf(ptr, "[too many reads]");
1578         if (priv->no_valid_reads)
1579                 ptr += sprintf(ptr, "[no valid reads-from]");
1580         if (priv->bad_synchronization)
1581                 ptr += sprintf(ptr, "[bad sw ordering]");
1582         if (promises_expired())
1583                 ptr += sprintf(ptr, "[promise expired]");
1584         if (promises->size() != 0)
1585                 ptr += sprintf(ptr, "[unresolved promise]");
1586         if (ptr != buf)
1587                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1588 }
1589
1590 /**
1591  * Returns whether the current completed trace is feasible, except for pending
1592  * release sequences.
1593  */
1594 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1595 {
1596         return !is_infeasible() && promises->size() == 0;
1597 }
1598
1599 /**
1600  * Check if the current partial trace is infeasible. Does not check any
1601  * end-of-execution flags, which might rule out the execution. Thus, this is
1602  * useful only for ruling an execution as infeasible.
1603  * @return whether the current partial trace is infeasible.
1604  */
1605 bool ModelChecker::is_infeasible() const
1606 {
1607         return mo_graph->checkForCycles() ||
1608                 priv->no_valid_reads ||
1609                 priv->failed_promise ||
1610                 priv->too_many_reads ||
1611                 priv->bad_synchronization ||
1612                 promises_expired();
1613 }
1614
1615 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1616 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1617         ModelAction *lastread = get_last_action(act->get_tid());
1618         lastread->process_rmw(act);
1619         if (act->is_rmw()) {
1620                 if (lastread->get_reads_from())
1621                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1622                 else
1623                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1624                 mo_graph->commitChanges();
1625         }
1626         return lastread;
1627 }
1628
1629 /**
1630  * Checks whether a thread has read from the same write for too many times
1631  * without seeing the effects of a later write.
1632  *
1633  * Basic idea:
1634  * 1) there must a different write that we could read from that would satisfy the modification order,
1635  * 2) we must have read from the same value in excess of maxreads times, and
1636  * 3) that other write must have been in the reads_from set for maxreads times.
1637  *
1638  * If so, we decide that the execution is no longer feasible.
1639  */
1640 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1641 {
1642         if (params.maxreads != 0) {
1643                 if (curr->get_node()->get_read_from_past_size() <= 1)
1644                         return;
1645                 //Must make sure that execution is currently feasible...  We could
1646                 //accidentally clear by rolling back
1647                 if (is_infeasible())
1648                         return;
1649                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1650                 int tid = id_to_int(curr->get_tid());
1651
1652                 /* Skip checks */
1653                 if ((int)thrd_lists->size() <= tid)
1654                         return;
1655                 action_list_t *list = &(*thrd_lists)[tid];
1656
1657                 action_list_t::reverse_iterator rit = list->rbegin();
1658                 /* Skip past curr */
1659                 for (; (*rit) != curr; rit++)
1660                         ;
1661                 /* go past curr now */
1662                 rit++;
1663
1664                 action_list_t::reverse_iterator ritcopy = rit;
1665                 //See if we have enough reads from the same value
1666                 int count = 0;
1667                 for (; count < params.maxreads; rit++, count++) {
1668                         if (rit == list->rend())
1669                                 return;
1670                         ModelAction *act = *rit;
1671                         if (!act->is_read())
1672                                 return;
1673
1674                         if (act->get_reads_from() != rf)
1675                                 return;
1676                         if (act->get_node()->get_read_from_past_size() <= 1)
1677                                 return;
1678                 }
1679                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1680                         /* Get write */
1681                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1682
1683                         /* Need a different write */
1684                         if (write == rf)
1685                                 continue;
1686
1687                         /* Test to see whether this is a feasible write to read from */
1688                         /** NOTE: all members of read-from set should be
1689                          *  feasible, so we no longer check it here **/
1690
1691                         rit = ritcopy;
1692
1693                         bool feasiblewrite = true;
1694                         //new we need to see if this write works for everyone
1695
1696                         for (int loop = count; loop > 0; loop--, rit++) {
1697                                 ModelAction *act = *rit;
1698                                 bool foundvalue = false;
1699                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1700                                         if (act->get_node()->get_read_from_past(j) == write) {
1701                                                 foundvalue = true;
1702                                                 break;
1703                                         }
1704                                 }
1705                                 if (!foundvalue) {
1706                                         feasiblewrite = false;
1707                                         break;
1708                                 }
1709                         }
1710                         if (feasiblewrite) {
1711                                 priv->too_many_reads = true;
1712                                 return;
1713                         }
1714                 }
1715         }
1716 }
1717
1718 /**
1719  * Updates the mo_graph with the constraints imposed from the current
1720  * read.
1721  *
1722  * Basic idea is the following: Go through each other thread and find
1723  * the last action that happened before our read.  Two cases:
1724  *
1725  * (1) The action is a write => that write must either occur before
1726  * the write we read from or be the write we read from.
1727  *
1728  * (2) The action is a read => the write that that action read from
1729  * must occur before the write we read from or be the same write.
1730  *
1731  * @param curr The current action. Must be a read.
1732  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1733  * @return True if modification order edges were added; false otherwise
1734  */
1735 template <typename rf_type>
1736 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1737 {
1738         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1739         unsigned int i;
1740         bool added = false;
1741         ASSERT(curr->is_read());
1742
1743         /* Last SC fence in the current thread */
1744         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1745
1746         /* Iterate over all threads */
1747         for (i = 0; i < thrd_lists->size(); i++) {
1748                 /* Last SC fence in thread i */
1749                 ModelAction *last_sc_fence_thread_local = NULL;
1750                 if (int_to_id((int)i) != curr->get_tid())
1751                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1752
1753                 /* Last SC fence in thread i, before last SC fence in current thread */
1754                 ModelAction *last_sc_fence_thread_before = NULL;
1755                 if (last_sc_fence_local)
1756                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1757
1758                 /* Iterate over actions in thread, starting from most recent */
1759                 action_list_t *list = &(*thrd_lists)[i];
1760                 action_list_t::reverse_iterator rit;
1761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1762                         ModelAction *act = *rit;
1763
1764                         if (act->is_write() && !act->equals(rf) && act != curr) {
1765                                 /* C++, Section 29.3 statement 5 */
1766                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1767                                                 *act < *last_sc_fence_thread_local) {
1768                                         added = mo_graph->addEdge(act, rf) || added;
1769                                         break;
1770                                 }
1771                                 /* C++, Section 29.3 statement 4 */
1772                                 else if (act->is_seqcst() && last_sc_fence_local &&
1773                                                 *act < *last_sc_fence_local) {
1774                                         added = mo_graph->addEdge(act, rf) || added;
1775                                         break;
1776                                 }
1777                                 /* C++, Section 29.3 statement 6 */
1778                                 else if (last_sc_fence_thread_before &&
1779                                                 *act < *last_sc_fence_thread_before) {
1780                                         added = mo_graph->addEdge(act, rf) || added;
1781                                         break;
1782                                 }
1783                         }
1784
1785                         /*
1786                          * Include at most one act per-thread that "happens
1787                          * before" curr. Don't consider reflexively.
1788                          */
1789                         if (act->happens_before(curr) && act != curr) {
1790                                 if (act->is_write()) {
1791                                         if (!act->equals(rf)) {
1792                                                 added = mo_graph->addEdge(act, rf) || added;
1793                                         }
1794                                 } else {
1795                                         const ModelAction *prevrf = act->get_reads_from();
1796                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1797                                         if (prevrf) {
1798                                                 if (!prevrf->equals(rf))
1799                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1800                                         } else if (!prevrf_promise->equals(rf)) {
1801                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1802                                         }
1803                                 }
1804                                 break;
1805                         }
1806                 }
1807         }
1808
1809         /*
1810          * All compatible, thread-exclusive promises must be ordered after any
1811          * concrete loads from the same thread
1812          */
1813         for (unsigned int i = 0; i < promises->size(); i++)
1814                 if ((*promises)[i]->is_compatible_exclusive(curr))
1815                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1816
1817         return added;
1818 }
1819
1820 /**
1821  * Updates the mo_graph with the constraints imposed from the current write.
1822  *
1823  * Basic idea is the following: Go through each other thread and find
1824  * the lastest action that happened before our write.  Two cases:
1825  *
1826  * (1) The action is a write => that write must occur before
1827  * the current write
1828  *
1829  * (2) The action is a read => the write that that action read from
1830  * must occur before the current write.
1831  *
1832  * This method also handles two other issues:
1833  *
1834  * (I) Sequential Consistency: Making sure that if the current write is
1835  * seq_cst, that it occurs after the previous seq_cst write.
1836  *
1837  * (II) Sending the write back to non-synchronizing reads.
1838  *
1839  * @param curr The current action. Must be a write.
1840  * @return True if modification order edges were added; false otherwise
1841  */
1842 bool ModelChecker::w_modification_order(ModelAction *curr)
1843 {
1844         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1845         unsigned int i;
1846         bool added = false;
1847         ASSERT(curr->is_write());
1848
1849         if (curr->is_seqcst()) {
1850                 /* We have to at least see the last sequentially consistent write,
1851                          so we are initialized. */
1852                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1853                 if (last_seq_cst != NULL) {
1854                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1855                 }
1856         }
1857
1858         /* Last SC fence in the current thread */
1859         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1860
1861         /* Iterate over all threads */
1862         for (i = 0; i < thrd_lists->size(); i++) {
1863                 /* Last SC fence in thread i, before last SC fence in current thread */
1864                 ModelAction *last_sc_fence_thread_before = NULL;
1865                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1866                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1867
1868                 /* Iterate over actions in thread, starting from most recent */
1869                 action_list_t *list = &(*thrd_lists)[i];
1870                 action_list_t::reverse_iterator rit;
1871                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1872                         ModelAction *act = *rit;
1873                         if (act == curr) {
1874                                 /*
1875                                  * 1) If RMW and it actually read from something, then we
1876                                  * already have all relevant edges, so just skip to next
1877                                  * thread.
1878                                  *
1879                                  * 2) If RMW and it didn't read from anything, we should
1880                                  * whatever edge we can get to speed up convergence.
1881                                  *
1882                                  * 3) If normal write, we need to look at earlier actions, so
1883                                  * continue processing list.
1884                                  */
1885                                 if (curr->is_rmw()) {
1886                                         if (curr->get_reads_from() != NULL)
1887                                                 break;
1888                                         else
1889                                                 continue;
1890                                 } else
1891                                         continue;
1892                         }
1893
1894                         /* C++, Section 29.3 statement 7 */
1895                         if (last_sc_fence_thread_before && act->is_write() &&
1896                                         *act < *last_sc_fence_thread_before) {
1897                                 added = mo_graph->addEdge(act, curr) || added;
1898                                 break;
1899                         }
1900
1901                         /*
1902                          * Include at most one act per-thread that "happens
1903                          * before" curr
1904                          */
1905                         if (act->happens_before(curr)) {
1906                                 /*
1907                                  * Note: if act is RMW, just add edge:
1908                                  *   act --mo--> curr
1909                                  * The following edge should be handled elsewhere:
1910                                  *   readfrom(act) --mo--> act
1911                                  */
1912                                 if (act->is_write())
1913                                         added = mo_graph->addEdge(act, curr) || added;
1914                                 else if (act->is_read()) {
1915                                         //if previous read accessed a null, just keep going
1916                                         if (act->get_reads_from() == NULL)
1917                                                 continue;
1918                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1919                                 }
1920                                 break;
1921                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1922                                                      !act->same_thread(curr)) {
1923                                 /* We have an action that:
1924                                    (1) did not happen before us
1925                                    (2) is a read and we are a write
1926                                    (3) cannot synchronize with us
1927                                    (4) is in a different thread
1928                                    =>
1929                                    that read could potentially read from our write.  Note that
1930                                    these checks are overly conservative at this point, we'll
1931                                    do more checks before actually removing the
1932                                    pendingfuturevalue.
1933
1934                                  */
1935                                 if (thin_air_constraint_may_allow(curr, act)) {
1936                                         if (!is_infeasible())
1937                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1938                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1939                                                 add_future_value(curr, act);
1940                                 }
1941                         }
1942                 }
1943         }
1944
1945         /*
1946          * All compatible, thread-exclusive promises must be ordered after any
1947          * concrete stores to the same thread, or else they can be merged with
1948          * this store later
1949          */
1950         for (unsigned int i = 0; i < promises->size(); i++)
1951                 if ((*promises)[i]->is_compatible_exclusive(curr))
1952                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1953
1954         return added;
1955 }
1956
1957 /** Arbitrary reads from the future are not allowed.  Section 29.3
1958  * part 9 places some constraints.  This method checks one result of constraint
1959  * constraint.  Others require compiler support. */
1960 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1961 {
1962         if (!writer->is_rmw())
1963                 return true;
1964
1965         if (!reader->is_rmw())
1966                 return true;
1967
1968         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1969                 if (search == reader)
1970                         return false;
1971                 if (search->get_tid() == reader->get_tid() &&
1972                                 search->happens_before(reader))
1973                         break;
1974         }
1975
1976         return true;
1977 }
1978
1979 /**
1980  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1981  * some constraints. This method checks one the following constraint (others
1982  * require compiler support):
1983  *
1984  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1985  */
1986 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1987 {
1988         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1989         unsigned int i;
1990         /* Iterate over all threads */
1991         for (i = 0; i < thrd_lists->size(); i++) {
1992                 const ModelAction *write_after_read = NULL;
1993
1994                 /* Iterate over actions in thread, starting from most recent */
1995                 action_list_t *list = &(*thrd_lists)[i];
1996                 action_list_t::reverse_iterator rit;
1997                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1998                         ModelAction *act = *rit;
1999
2000                         /* Don't disallow due to act == reader */
2001                         if (!reader->happens_before(act) || reader == act)
2002                                 break;
2003                         else if (act->is_write())
2004                                 write_after_read = act;
2005                         else if (act->is_read() && act->get_reads_from() != NULL)
2006                                 write_after_read = act->get_reads_from();
2007                 }
2008
2009                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2010                         return false;
2011         }
2012         return true;
2013 }
2014
2015 /**
2016  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2017  * The ModelAction under consideration is expected to be taking part in
2018  * release/acquire synchronization as an object of the "reads from" relation.
2019  * Note that this can only provide release sequence support for RMW chains
2020  * which do not read from the future, as those actions cannot be traced until
2021  * their "promise" is fulfilled. Similarly, we may not even establish the
2022  * presence of a release sequence with certainty, as some modification order
2023  * constraints may be decided further in the future. Thus, this function
2024  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2025  * and a boolean representing certainty.
2026  *
2027  * @param rf The action that might be part of a release sequence. Must be a
2028  * write.
2029  * @param release_heads A pass-by-reference style return parameter. After
2030  * execution of this function, release_heads will contain the heads of all the
2031  * relevant release sequences, if any exists with certainty
2032  * @param pending A pass-by-reference style return parameter which is only used
2033  * when returning false (i.e., uncertain). Returns most information regarding
2034  * an uncertain release sequence, including any write operations that might
2035  * break the sequence.
2036  * @return true, if the ModelChecker is certain that release_heads is complete;
2037  * false otherwise
2038  */
2039 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2040                 rel_heads_list_t *release_heads,
2041                 struct release_seq *pending) const
2042 {
2043         /* Only check for release sequences if there are no cycles */
2044         if (mo_graph->checkForCycles())
2045                 return false;
2046
2047         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2048                 ASSERT(rf->is_write());
2049
2050                 if (rf->is_release())
2051                         release_heads->push_back(rf);
2052                 else if (rf->get_last_fence_release())
2053                         release_heads->push_back(rf->get_last_fence_release());
2054                 if (!rf->is_rmw())
2055                         break; /* End of RMW chain */
2056
2057                 /** @todo Need to be smarter here...  In the linux lock
2058                  * example, this will run to the beginning of the program for
2059                  * every acquire. */
2060                 /** @todo The way to be smarter here is to keep going until 1
2061                  * thread has a release preceded by an acquire and you've seen
2062                  *       both. */
2063
2064                 /* acq_rel RMW is a sufficient stopping condition */
2065                 if (rf->is_acquire() && rf->is_release())
2066                         return true; /* complete */
2067         };
2068         if (!rf) {
2069                 /* read from future: need to settle this later */
2070                 pending->rf = NULL;
2071                 return false; /* incomplete */
2072         }
2073
2074         if (rf->is_release())
2075                 return true; /* complete */
2076
2077         /* else relaxed write
2078          * - check for fence-release in the same thread (29.8, stmt. 3)
2079          * - check modification order for contiguous subsequence
2080          *   -> rf must be same thread as release */
2081
2082         const ModelAction *fence_release = rf->get_last_fence_release();
2083         /* Synchronize with a fence-release unconditionally; we don't need to
2084          * find any more "contiguous subsequence..." for it */
2085         if (fence_release)
2086                 release_heads->push_back(fence_release);
2087
2088         int tid = id_to_int(rf->get_tid());
2089         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2090         action_list_t *list = &(*thrd_lists)[tid];
2091         action_list_t::const_reverse_iterator rit;
2092
2093         /* Find rf in the thread list */
2094         rit = std::find(list->rbegin(), list->rend(), rf);
2095         ASSERT(rit != list->rend());
2096
2097         /* Find the last {write,fence}-release */
2098         for (; rit != list->rend(); rit++) {
2099                 if (fence_release && *(*rit) < *fence_release)
2100                         break;
2101                 if ((*rit)->is_release())
2102                         break;
2103         }
2104         if (rit == list->rend()) {
2105                 /* No write-release in this thread */
2106                 return true; /* complete */
2107         } else if (fence_release && *(*rit) < *fence_release) {
2108                 /* The fence-release is more recent (and so, "stronger") than
2109                  * the most recent write-release */
2110                 return true; /* complete */
2111         } /* else, need to establish contiguous release sequence */
2112         ModelAction *release = *rit;
2113
2114         ASSERT(rf->same_thread(release));
2115
2116         pending->writes.clear();
2117
2118         bool certain = true;
2119         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2120                 if (id_to_int(rf->get_tid()) == (int)i)
2121                         continue;
2122                 list = &(*thrd_lists)[i];
2123
2124                 /* Can we ensure no future writes from this thread may break
2125                  * the release seq? */
2126                 bool future_ordered = false;
2127
2128                 ModelAction *last = get_last_action(int_to_id(i));
2129                 Thread *th = get_thread(int_to_id(i));
2130                 if ((last && rf->happens_before(last)) ||
2131                                 !is_enabled(th) ||
2132                                 th->is_complete())
2133                         future_ordered = true;
2134
2135                 ASSERT(!th->is_model_thread() || future_ordered);
2136
2137                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2138                         const ModelAction *act = *rit;
2139                         /* Reach synchronization -> this thread is complete */
2140                         if (act->happens_before(release))
2141                                 break;
2142                         if (rf->happens_before(act)) {
2143                                 future_ordered = true;
2144                                 continue;
2145                         }
2146
2147                         /* Only non-RMW writes can break release sequences */
2148                         if (!act->is_write() || act->is_rmw())
2149                                 continue;
2150
2151                         /* Check modification order */
2152                         if (mo_graph->checkReachable(rf, act)) {
2153                                 /* rf --mo--> act */
2154                                 future_ordered = true;
2155                                 continue;
2156                         }
2157                         if (mo_graph->checkReachable(act, release))
2158                                 /* act --mo--> release */
2159                                 break;
2160                         if (mo_graph->checkReachable(release, act) &&
2161                                       mo_graph->checkReachable(act, rf)) {
2162                                 /* release --mo-> act --mo--> rf */
2163                                 return true; /* complete */
2164                         }
2165                         /* act may break release sequence */
2166                         pending->writes.push_back(act);
2167                         certain = false;
2168                 }
2169                 if (!future_ordered)
2170                         certain = false; /* This thread is uncertain */
2171         }
2172
2173         if (certain) {
2174                 release_heads->push_back(release);
2175                 pending->writes.clear();
2176         } else {
2177                 pending->release = release;
2178                 pending->rf = rf;
2179         }
2180         return certain;
2181 }
2182
2183 /**
2184  * An interface for getting the release sequence head(s) with which a
2185  * given ModelAction must synchronize. This function only returns a non-empty
2186  * result when it can locate a release sequence head with certainty. Otherwise,
2187  * it may mark the internal state of the ModelChecker so that it will handle
2188  * the release sequence at a later time, causing @a acquire to update its
2189  * synchronization at some later point in execution.
2190  *
2191  * @param acquire The 'acquire' action that may synchronize with a release
2192  * sequence
2193  * @param read The read action that may read from a release sequence; this may
2194  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2195  * when 'acquire' is a fence-acquire)
2196  * @param release_heads A pass-by-reference return parameter. Will be filled
2197  * with the head(s) of the release sequence(s), if they exists with certainty.
2198  * @see ModelChecker::release_seq_heads
2199  */
2200 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2201                 ModelAction *read, rel_heads_list_t *release_heads)
2202 {
2203         const ModelAction *rf = read->get_reads_from();
2204         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2205         sequence->acquire = acquire;
2206         sequence->read = read;
2207
2208         if (!release_seq_heads(rf, release_heads, sequence)) {
2209                 /* add act to 'lazy checking' list */
2210                 pending_rel_seqs->push_back(sequence);
2211         } else {
2212                 snapshot_free(sequence);
2213         }
2214 }
2215
2216 /**
2217  * Attempt to resolve all stashed operations that might synchronize with a
2218  * release sequence for a given location. This implements the "lazy" portion of
2219  * determining whether or not a release sequence was contiguous, since not all
2220  * modification order information is present at the time an action occurs.
2221  *
2222  * @param location The location/object that should be checked for release
2223  * sequence resolutions. A NULL value means to check all locations.
2224  * @param work_queue The work queue to which to add work items as they are
2225  * generated
2226  * @return True if any updates occurred (new synchronization, new mo_graph
2227  * edges)
2228  */
2229 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2230 {
2231         bool updated = false;
2232         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2233         while (it != pending_rel_seqs->end()) {
2234                 struct release_seq *pending = *it;
2235                 ModelAction *acquire = pending->acquire;
2236                 const ModelAction *read = pending->read;
2237
2238                 /* Only resolve sequences on the given location, if provided */
2239                 if (location && read->get_location() != location) {
2240                         it++;
2241                         continue;
2242                 }
2243
2244                 const ModelAction *rf = read->get_reads_from();
2245                 rel_heads_list_t release_heads;
2246                 bool complete;
2247                 complete = release_seq_heads(rf, &release_heads, pending);
2248                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2249                         if (!acquire->has_synchronized_with(release_heads[i])) {
2250                                 if (acquire->synchronize_with(release_heads[i]))
2251                                         updated = true;
2252                                 else
2253                                         set_bad_synchronization();
2254                         }
2255                 }
2256
2257                 if (updated) {
2258                         /* Re-check all pending release sequences */
2259                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2260                         /* Re-check read-acquire for mo_graph edges */
2261                         if (acquire->is_read())
2262                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2263
2264                         /* propagate synchronization to later actions */
2265                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2266                         for (; (*rit) != acquire; rit++) {
2267                                 ModelAction *propagate = *rit;
2268                                 if (acquire->happens_before(propagate)) {
2269                                         propagate->synchronize_with(acquire);
2270                                         /* Re-check 'propagate' for mo_graph edges */
2271                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2272                                 }
2273                         }
2274                 }
2275                 if (complete) {
2276                         it = pending_rel_seqs->erase(it);
2277                         snapshot_free(pending);
2278                 } else {
2279                         it++;
2280                 }
2281         }
2282
2283         // If we resolved promises or data races, see if we have realized a data race.
2284         checkDataRaces();
2285
2286         return updated;
2287 }
2288
2289 /**
2290  * Performs various bookkeeping operations for the current ModelAction. For
2291  * instance, adds action to the per-object, per-thread action vector and to the
2292  * action trace list of all thread actions.
2293  *
2294  * @param act is the ModelAction to add.
2295  */
2296 void ModelChecker::add_action_to_lists(ModelAction *act)
2297 {
2298         int tid = id_to_int(act->get_tid());
2299         ModelAction *uninit = NULL;
2300         int uninit_id = -1;
2301         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2302         if (list->empty() && act->is_atomic_var()) {
2303                 uninit = new_uninitialized_action(act->get_location());
2304                 uninit_id = id_to_int(uninit->get_tid());
2305                 list->push_back(uninit);
2306         }
2307         list->push_back(act);
2308
2309         action_trace->push_back(act);
2310         if (uninit)
2311                 action_trace->push_front(uninit);
2312
2313         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2314         if (tid >= (int)vec->size())
2315                 vec->resize(priv->next_thread_id);
2316         (*vec)[tid].push_back(act);
2317         if (uninit)
2318                 (*vec)[uninit_id].push_front(uninit);
2319
2320         if ((int)thrd_last_action->size() <= tid)
2321                 thrd_last_action->resize(get_num_threads());
2322         (*thrd_last_action)[tid] = act;
2323         if (uninit)
2324                 (*thrd_last_action)[uninit_id] = uninit;
2325
2326         if (act->is_fence() && act->is_release()) {
2327                 if ((int)thrd_last_fence_release->size() <= tid)
2328                         thrd_last_fence_release->resize(get_num_threads());
2329                 (*thrd_last_fence_release)[tid] = act;
2330         }
2331
2332         if (act->is_wait()) {
2333                 void *mutex_loc = (void *) act->get_value();
2334                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2335
2336                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2337                 if (tid >= (int)vec->size())
2338                         vec->resize(priv->next_thread_id);
2339                 (*vec)[tid].push_back(act);
2340         }
2341 }
2342
2343 /**
2344  * @brief Get the last action performed by a particular Thread
2345  * @param tid The thread ID of the Thread in question
2346  * @return The last action in the thread
2347  */
2348 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2349 {
2350         int threadid = id_to_int(tid);
2351         if (threadid < (int)thrd_last_action->size())
2352                 return (*thrd_last_action)[id_to_int(tid)];
2353         else
2354                 return NULL;
2355 }
2356
2357 /**
2358  * @brief Get the last fence release performed by a particular Thread
2359  * @param tid The thread ID of the Thread in question
2360  * @return The last fence release in the thread, if one exists; NULL otherwise
2361  */
2362 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2363 {
2364         int threadid = id_to_int(tid);
2365         if (threadid < (int)thrd_last_fence_release->size())
2366                 return (*thrd_last_fence_release)[id_to_int(tid)];
2367         else
2368                 return NULL;
2369 }
2370
2371 /**
2372  * Gets the last memory_order_seq_cst write (in the total global sequence)
2373  * performed on a particular object (i.e., memory location), not including the
2374  * current action.
2375  * @param curr The current ModelAction; also denotes the object location to
2376  * check
2377  * @return The last seq_cst write
2378  */
2379 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2380 {
2381         void *location = curr->get_location();
2382         action_list_t *list = get_safe_ptr_action(obj_map, location);
2383         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2384         action_list_t::reverse_iterator rit;
2385         for (rit = list->rbegin(); rit != list->rend(); rit++)
2386                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2387                         return *rit;
2388         return NULL;
2389 }
2390
2391 /**
2392  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2393  * performed in a particular thread, prior to a particular fence.
2394  * @param tid The ID of the thread to check
2395  * @param before_fence The fence from which to begin the search; if NULL, then
2396  * search for the most recent fence in the thread.
2397  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2398  */
2399 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2400 {
2401         /* All fences should have NULL location */
2402         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2403         action_list_t::reverse_iterator rit = list->rbegin();
2404
2405         if (before_fence) {
2406                 for (; rit != list->rend(); rit++)
2407                         if (*rit == before_fence)
2408                                 break;
2409
2410                 ASSERT(*rit == before_fence);
2411                 rit++;
2412         }
2413
2414         for (; rit != list->rend(); rit++)
2415                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2416                         return *rit;
2417         return NULL;
2418 }
2419
2420 /**
2421  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2422  * location). This function identifies the mutex according to the current
2423  * action, which is presumed to perform on the same mutex.
2424  * @param curr The current ModelAction; also denotes the object location to
2425  * check
2426  * @return The last unlock operation
2427  */
2428 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2429 {
2430         void *location = curr->get_location();
2431         action_list_t *list = get_safe_ptr_action(obj_map, location);
2432         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2433         action_list_t::reverse_iterator rit;
2434         for (rit = list->rbegin(); rit != list->rend(); rit++)
2435                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2436                         return *rit;
2437         return NULL;
2438 }
2439
2440 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2441 {
2442         ModelAction *parent = get_last_action(tid);
2443         if (!parent)
2444                 parent = get_thread(tid)->get_creation();
2445         return parent;
2446 }
2447
2448 /**
2449  * Returns the clock vector for a given thread.
2450  * @param tid The thread whose clock vector we want
2451  * @return Desired clock vector
2452  */
2453 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2454 {
2455         return get_parent_action(tid)->get_cv();
2456 }
2457
2458 /**
2459  * @brief Find the promise, if any to resolve for the current action
2460  * @param curr The current ModelAction. Should be a write.
2461  * @return The (non-negative) index for the Promise to resolve, if any;
2462  * otherwise -1
2463  */
2464 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2465 {
2466         for (unsigned int i = 0; i < promises->size(); i++)
2467                 if (curr->get_node()->get_promise(i))
2468                         return i;
2469         return -1;
2470 }
2471
2472 /**
2473  * Resolve a Promise with a current write.
2474  * @param write The ModelAction that is fulfilling Promises
2475  * @param promise_idx The index corresponding to the promise
2476  * @return True if the Promise was successfully resolved; false otherwise
2477  */
2478 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2479 {
2480         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2481         promise_list_t mustResolve;
2482         Promise *promise = (*promises)[promise_idx];
2483
2484         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2485                 ModelAction *read = promise->get_reader(i);
2486                 read_from(read, write);
2487                 actions_to_check.push_back(read);
2488         }
2489         /* Make sure the promise's value matches the write's value */
2490         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2491         mo_graph->resolvePromise(promise, write, &mustResolve);
2492
2493         promises->erase(promises->begin() + promise_idx);
2494
2495         /** @todo simplify the 'mustResolve' stuff */
2496         ASSERT(mustResolve.size() <= 1);
2497
2498         if (!mustResolve.empty() && mustResolve[0] != promise)
2499                 priv->failed_promise = true;
2500         delete promise;
2501
2502         //Check whether reading these writes has made threads unable to
2503         //resolve promises
2504
2505         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2506                 ModelAction *read = actions_to_check[i];
2507                 mo_check_promises(read, true);
2508         }
2509
2510         return true;
2511 }
2512
2513 /**
2514  * Compute the set of promises that could potentially be satisfied by this
2515  * action. Note that the set computation actually appears in the Node, not in
2516  * ModelChecker.
2517  * @param curr The ModelAction that may satisfy promises
2518  */
2519 void ModelChecker::compute_promises(ModelAction *curr)
2520 {
2521         for (unsigned int i = 0; i < promises->size(); i++) {
2522                 Promise *promise = (*promises)[i];
2523                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2524                         continue;
2525
2526                 bool satisfy = true;
2527                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2528                         const ModelAction *act = promise->get_reader(j);
2529                         if (act->happens_before(curr) ||
2530                                         act->could_synchronize_with(curr)) {
2531                                 satisfy = false;
2532                                 break;
2533                         }
2534                 }
2535                 if (satisfy)
2536                         curr->get_node()->set_promise(i);
2537         }
2538 }
2539
2540 /** Checks promises in response to change in ClockVector Threads. */
2541 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2542 {
2543         for (unsigned int i = 0; i < promises->size(); i++) {
2544                 Promise *promise = (*promises)[i];
2545                 if (!promise->thread_is_available(tid))
2546                         continue;
2547                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2548                         const ModelAction *act = promise->get_reader(j);
2549                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2550                                         merge_cv->synchronized_since(act)) {
2551                                 if (promise->eliminate_thread(tid)) {
2552                                         /* Promise has failed */
2553                                         priv->failed_promise = true;
2554                                         return;
2555                                 }
2556                         }
2557                 }
2558         }
2559 }
2560
2561 void ModelChecker::check_promises_thread_disabled()
2562 {
2563         for (unsigned int i = 0; i < promises->size(); i++) {
2564                 Promise *promise = (*promises)[i];
2565                 if (promise->has_failed()) {
2566                         priv->failed_promise = true;
2567                         return;
2568                 }
2569         }
2570 }
2571
2572 /**
2573  * @brief Checks promises in response to addition to modification order for
2574  * threads.
2575  *
2576  * We test whether threads are still available for satisfying promises after an
2577  * addition to our modification order constraints. Those that are unavailable
2578  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2579  * that promise has failed.
2580  *
2581  * @param act The ModelAction which updated the modification order
2582  * @param is_read_check Should be true if act is a read and we must check for
2583  * updates to the store from which it read (there is a distinction here for
2584  * RMW's, which are both a load and a store)
2585  */
2586 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2587 {
2588         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2589
2590         for (unsigned int i = 0; i < promises->size(); i++) {
2591                 Promise *promise = (*promises)[i];
2592
2593                 // Is this promise on the same location?
2594                 if (!promise->same_location(write))
2595                         continue;
2596
2597                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2598                         const ModelAction *pread = promise->get_reader(j);
2599                         if (!pread->happens_before(act))
2600                                continue;
2601                         if (mo_graph->checkPromise(write, promise)) {
2602                                 priv->failed_promise = true;
2603                                 return;
2604                         }
2605                         break;
2606                 }
2607
2608                 // Don't do any lookups twice for the same thread
2609                 if (!promise->thread_is_available(act->get_tid()))
2610                         continue;
2611
2612                 if (mo_graph->checkReachable(promise, write)) {
2613                         if (mo_graph->checkPromise(write, promise)) {
2614                                 priv->failed_promise = true;
2615                                 return;
2616                         }
2617                 }
2618         }
2619 }
2620
2621 /**
2622  * Compute the set of writes that may break the current pending release
2623  * sequence. This information is extracted from previou release sequence
2624  * calculations.
2625  *
2626  * @param curr The current ModelAction. Must be a release sequence fixup
2627  * action.
2628  */
2629 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2630 {
2631         if (pending_rel_seqs->empty())
2632                 return;
2633
2634         struct release_seq *pending = pending_rel_seqs->back();
2635         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2636                 const ModelAction *write = pending->writes[i];
2637                 curr->get_node()->add_relseq_break(write);
2638         }
2639
2640         /* NULL means don't break the sequence; just synchronize */
2641         curr->get_node()->add_relseq_break(NULL);
2642 }
2643
2644 /**
2645  * Build up an initial set of all past writes that this 'read' action may read
2646  * from, as well as any previously-observed future values that must still be valid.
2647  *
2648  * @param curr is the current ModelAction that we are exploring; it must be a
2649  * 'read' operation.
2650  */
2651 void ModelChecker::build_may_read_from(ModelAction *curr)
2652 {
2653         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2654         unsigned int i;
2655         ASSERT(curr->is_read());
2656
2657         ModelAction *last_sc_write = NULL;
2658
2659         if (curr->is_seqcst())
2660                 last_sc_write = get_last_seq_cst_write(curr);
2661
2662         /* Iterate over all threads */
2663         for (i = 0; i < thrd_lists->size(); i++) {
2664                 /* Iterate over actions in thread, starting from most recent */
2665                 action_list_t *list = &(*thrd_lists)[i];
2666                 action_list_t::reverse_iterator rit;
2667                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2668                         ModelAction *act = *rit;
2669
2670                         /* Only consider 'write' actions */
2671                         if (!act->is_write() || act == curr)
2672                                 continue;
2673
2674                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2675                         bool allow_read = true;
2676
2677                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2678                                 allow_read = false;
2679                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2680                                 allow_read = false;
2681
2682                         if (allow_read) {
2683                                 /* Only add feasible reads */
2684                                 mo_graph->startChanges();
2685                                 r_modification_order(curr, act);
2686                                 if (!is_infeasible())
2687                                         curr->get_node()->add_read_from_past(act);
2688                                 mo_graph->rollbackChanges();
2689                         }
2690
2691                         /* Include at most one act per-thread that "happens before" curr */
2692                         if (act->happens_before(curr))
2693                                 break;
2694                 }
2695         }
2696
2697         /* Inherit existing, promised future values */
2698         for (i = 0; i < promises->size(); i++) {
2699                 const Promise *promise = (*promises)[i];
2700                 const ModelAction *promise_read = promise->get_reader(0);
2701                 if (promise_read->same_var(curr)) {
2702                         /* Only add feasible future-values */
2703                         mo_graph->startChanges();
2704                         r_modification_order(curr, promise);
2705                         if (!is_infeasible())
2706                                 curr->get_node()->add_read_from_promise(promise_read);
2707                         mo_graph->rollbackChanges();
2708                 }
2709         }
2710
2711         /* We may find no valid may-read-from only if the execution is doomed */
2712         if (!curr->get_node()->read_from_size()) {
2713                 priv->no_valid_reads = true;
2714                 set_assert();
2715         }
2716
2717         if (DBG_ENABLED()) {
2718                 model_print("Reached read action:\n");
2719                 curr->print();
2720                 model_print("Printing read_from_past\n");
2721                 curr->get_node()->print_read_from_past();
2722                 model_print("End printing read_from_past\n");
2723         }
2724 }
2725
2726 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2727 {
2728         for ( ; write != NULL; write = write->get_reads_from()) {
2729                 /* UNINIT actions don't have a Node, and they never sleep */
2730                 if (write->is_uninitialized())
2731                         return true;
2732                 Node *prevnode = write->get_node()->get_parent();
2733
2734                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2735                 if (write->is_release() && thread_sleep)
2736                         return true;
2737                 if (!write->is_rmw())
2738                         return false;
2739         }
2740         return true;
2741 }
2742
2743 /**
2744  * @brief Create a new action representing an uninitialized atomic
2745  * @param location The memory location of the atomic object
2746  * @return A pointer to a new ModelAction
2747  */
2748 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2749 {
2750         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2751         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2752         act->create_cv(NULL);
2753         return act;
2754 }
2755
2756 static void print_list(action_list_t *list)
2757 {
2758         action_list_t::iterator it;
2759
2760         model_print("---------------------------------------------------------------------\n");
2761
2762         unsigned int hash = 0;
2763
2764         for (it = list->begin(); it != list->end(); it++) {
2765                 (*it)->print();
2766                 hash = hash^(hash<<3)^((*it)->hash());
2767         }
2768         model_print("HASH %u\n", hash);
2769         model_print("---------------------------------------------------------------------\n");
2770 }
2771
2772 #if SUPPORT_MOD_ORDER_DUMP
2773 void ModelChecker::dumpGraph(char *filename) const
2774 {
2775         char buffer[200];
2776         sprintf(buffer, "%s.dot", filename);
2777         FILE *file = fopen(buffer, "w");
2778         fprintf(file, "digraph %s {\n", filename);
2779         mo_graph->dumpNodes(file);
2780         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2781
2782         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2783                 ModelAction *act = *it;
2784                 if (act->is_read()) {
2785                         mo_graph->dot_print_node(file, act);
2786                         if (act->get_reads_from())
2787                                 mo_graph->dot_print_edge(file,
2788                                                 act->get_reads_from(),
2789                                                 act,
2790                                                 "label=\"rf\", color=red, weight=2");
2791                         else
2792                                 mo_graph->dot_print_edge(file,
2793                                                 act->get_reads_from_promise(),
2794                                                 act,
2795                                                 "label=\"rf\", color=red");
2796                 }
2797                 if (thread_array[act->get_tid()]) {
2798                         mo_graph->dot_print_edge(file,
2799                                         thread_array[id_to_int(act->get_tid())],
2800                                         act,
2801                                         "label=\"sb\", color=blue, weight=400");
2802                 }
2803
2804                 thread_array[act->get_tid()] = act;
2805         }
2806         fprintf(file, "}\n");
2807         model_free(thread_array);
2808         fclose(file);
2809 }
2810 #endif
2811
2812 /** @brief Prints an execution trace summary. */
2813 void ModelChecker::print_summary() const
2814 {
2815 #if SUPPORT_MOD_ORDER_DUMP
2816         char buffername[100];
2817         sprintf(buffername, "exec%04u", stats.num_total);
2818         mo_graph->dumpGraphToFile(buffername);
2819         sprintf(buffername, "graph%04u", stats.num_total);
2820         dumpGraph(buffername);
2821 #endif
2822
2823         model_print("Execution %d:", stats.num_total);
2824         if (isfeasibleprefix()) {
2825                 if (scheduler->all_threads_sleeping())
2826                         model_print(" SLEEP-SET REDUNDANT");
2827                 model_print("\n");
2828         } else
2829                 print_infeasibility(" INFEASIBLE");
2830         print_list(action_trace);
2831         model_print("\n");
2832 }
2833
2834 /**
2835  * Add a Thread to the system for the first time. Should only be called once
2836  * per thread.
2837  * @param t The Thread to add
2838  */
2839 void ModelChecker::add_thread(Thread *t)
2840 {
2841         thread_map->put(id_to_int(t->get_id()), t);
2842         scheduler->add_thread(t);
2843 }
2844
2845 /**
2846  * Removes a thread from the scheduler.
2847  * @param the thread to remove.
2848  */
2849 void ModelChecker::remove_thread(Thread *t)
2850 {
2851         scheduler->remove_thread(t);
2852 }
2853
2854 /**
2855  * @brief Get a Thread reference by its ID
2856  * @param tid The Thread's ID
2857  * @return A Thread reference
2858  */
2859 Thread * ModelChecker::get_thread(thread_id_t tid) const
2860 {
2861         return thread_map->get(id_to_int(tid));
2862 }
2863
2864 /**
2865  * @brief Get a reference to the Thread in which a ModelAction was executed
2866  * @param act The ModelAction
2867  * @return A Thread reference
2868  */
2869 Thread * ModelChecker::get_thread(const ModelAction *act) const
2870 {
2871         return get_thread(act->get_tid());
2872 }
2873
2874 /**
2875  * @brief Get a Promise's "promise number"
2876  *
2877  * A "promise number" is an index number that is unique to a promise, valid
2878  * only for a specific snapshot of an execution trace. Promises may come and go
2879  * as they are generated an resolved, so an index only retains meaning for the
2880  * current snapshot.
2881  *
2882  * @param promise The Promise to check
2883  * @return The promise index, if the promise still is valid; otherwise -1
2884  */
2885 int ModelChecker::get_promise_number(const Promise *promise) const
2886 {
2887         for (unsigned int i = 0; i < promises->size(); i++)
2888                 if ((*promises)[i] == promise)
2889                         return i;
2890         /* Not found */
2891         return -1;
2892 }
2893
2894 /**
2895  * @brief Check if a Thread is currently enabled
2896  * @param t The Thread to check
2897  * @return True if the Thread is currently enabled
2898  */
2899 bool ModelChecker::is_enabled(Thread *t) const
2900 {
2901         return scheduler->is_enabled(t);
2902 }
2903
2904 /**
2905  * @brief Check if a Thread is currently enabled
2906  * @param tid The ID of the Thread to check
2907  * @return True if the Thread is currently enabled
2908  */
2909 bool ModelChecker::is_enabled(thread_id_t tid) const
2910 {
2911         return scheduler->is_enabled(tid);
2912 }
2913
2914 /**
2915  * Switch from a model-checker context to a user-thread context. This is the
2916  * complement of ModelChecker::switch_to_master and must be called from the
2917  * model-checker context
2918  *
2919  * @param thread The user-thread to switch to
2920  */
2921 void ModelChecker::switch_from_master(Thread *thread)
2922 {
2923         scheduler->set_current_thread(thread);
2924         Thread::swap(&system_context, thread);
2925 }
2926
2927 /**
2928  * Switch from a user-context to the "master thread" context (a.k.a. system
2929  * context). This switch is made with the intention of exploring a particular
2930  * model-checking action (described by a ModelAction object). Must be called
2931  * from a user-thread context.
2932  *
2933  * @param act The current action that will be explored. May be NULL only if
2934  * trace is exiting via an assertion (see ModelChecker::set_assert and
2935  * ModelChecker::has_asserted).
2936  * @return Return the value returned by the current action
2937  */
2938 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2939 {
2940         DBG();
2941         Thread *old = thread_current();
2942         ASSERT(!old->get_pending());
2943         old->set_pending(act);
2944         if (Thread::swap(old, &system_context) < 0) {
2945                 perror("swap threads");
2946                 exit(EXIT_FAILURE);
2947         }
2948         return old->get_return_value();
2949 }
2950
2951 /**
2952  * Takes the next step in the execution, if possible.
2953  * @param curr The current step to take
2954  * @return Returns the next Thread to run, if any; NULL if this execution
2955  * should terminate
2956  */
2957 Thread * ModelChecker::take_step(ModelAction *curr)
2958 {
2959         Thread *curr_thrd = get_thread(curr);
2960         ASSERT(curr_thrd->get_state() == THREAD_READY);
2961
2962         curr = check_current_action(curr);
2963
2964         /* Infeasible -> don't take any more steps */
2965         if (is_infeasible())
2966                 return NULL;
2967         else if (isfeasibleprefix() && have_bug_reports()) {
2968                 set_assert();
2969                 return NULL;
2970         }
2971
2972         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2973                 return NULL;
2974
2975         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2976                 scheduler->remove_thread(curr_thrd);
2977
2978         Thread *next_thrd = get_next_thread(curr);
2979
2980         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2981                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2982
2983         return next_thrd;
2984 }
2985
2986 /** Wrapper to run the user's main function, with appropriate arguments */
2987 void user_main_wrapper(void *)
2988 {
2989         user_main(model->params.argc, model->params.argv);
2990 }
2991
2992 /** @brief Run ModelChecker for the user program */
2993 void ModelChecker::run()
2994 {
2995         do {
2996                 thrd_t user_thread;
2997                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2998                 add_thread(t);
2999
3000                 do {
3001                         /*
3002                          * Stash next pending action(s) for thread(s). There
3003                          * should only need to stash one thread's action--the
3004                          * thread which just took a step--plus the first step
3005                          * for any newly-created thread
3006                          */
3007                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3008                                 thread_id_t tid = int_to_id(i);
3009                                 Thread *thr = get_thread(tid);
3010                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3011                                         switch_from_master(thr);
3012                                 }
3013                         }
3014
3015                         /* Catch assertions from prior take_step or from
3016                          * between-ModelAction bugs (e.g., data races) */
3017                         if (has_asserted())
3018                                 break;
3019
3020                         /* Consume the next action for a Thread */
3021                         ModelAction *curr = t->get_pending();
3022                         t->set_pending(NULL);
3023                         t = take_step(curr);
3024                 } while (t && !t->is_model_thread());
3025
3026                 /*
3027                  * Launch end-of-execution release sequence fixups only when
3028                  * the execution is otherwise feasible AND there are:
3029                  *
3030                  * (1) pending release sequences
3031                  * (2) pending assertions that could be invalidated by a change
3032                  * in clock vectors (i.e., data races)
3033                  * (3) no pending promises
3034                  */
3035                 while (!pending_rel_seqs->empty() &&
3036                                 is_feasible_prefix_ignore_relseq() &&
3037                                 !unrealizedraces.empty()) {
3038                         model_print("*** WARNING: release sequence fixup action "
3039                                         "(%zu pending release seuqence(s)) ***\n",
3040                                         pending_rel_seqs->size());
3041                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3042                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3043                                         model_thread);
3044                         take_step(fixup);
3045                 };
3046         } while (next_execution());
3047
3048         model_print("******* Model-checking complete: *******\n");
3049         print_stats();
3050 }