9e064ea2c76bbebbe02fb03fcdf0bbf47a57dfb3
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* Start the round robin scheduler from this thread id */
286                 scheduler->set_scheduler_thread(tid);
287                 /* The correct sleep set is in the parent node. */
288                 execute_sleep_set();
289
290                 DEBUG("*** Divergence point ***\n");
291
292                 diverge = NULL;
293         } else {
294                 tid = next->get_tid();
295         }
296         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
297         ASSERT(tid != THREAD_ID_T_NONE);
298         return thread_map->get(id_to_int(tid));
299 }
300
301 /**
302  * We need to know what the next actions of all threads in the sleep
303  * set will be.  This method computes them and stores the actions at
304  * the corresponding thread object's pending action.
305  */
306
307 void ModelChecker::execute_sleep_set()
308 {
309         for (unsigned int i = 0; i < get_num_threads(); i++) {
310                 thread_id_t tid = int_to_id(i);
311                 Thread *thr = get_thread(tid);
312                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
313                         thr->get_pending()->set_sleep_flag();
314                 }
315         }
316 }
317
318 /**
319  * @brief Should the current action wake up a given thread?
320  *
321  * @param curr The current action
322  * @param thread The thread that we might wake up
323  * @return True, if we should wake up the sleeping thread; false otherwise
324  */
325 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
326 {
327         const ModelAction *asleep = thread->get_pending();
328         /* Don't allow partial RMW to wake anyone up */
329         if (curr->is_rmwr())
330                 return false;
331         /* Synchronizing actions may have been backtracked */
332         if (asleep->could_synchronize_with(curr))
333                 return true;
334         /* All acquire/release fences and fence-acquire/store-release */
335         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
336                 return true;
337         /* Fence-release + store can awake load-acquire on the same location */
338         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
339                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
340                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
341                         return true;
342         }
343         return false;
344 }
345
346 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
347 {
348         for (unsigned int i = 0; i < get_num_threads(); i++) {
349                 Thread *thr = get_thread(int_to_id(i));
350                 if (scheduler->is_sleep_set(thr)) {
351                         if (should_wake_up(curr, thr))
352                                 /* Remove this thread from sleep set */
353                                 scheduler->remove_sleep(thr);
354                 }
355         }
356 }
357
358 /** @brief Alert the model-checker that an incorrectly-ordered
359  * synchronization was made */
360 void ModelChecker::set_bad_synchronization()
361 {
362         priv->bad_synchronization = true;
363 }
364
365 /**
366  * Check whether the current trace has triggered an assertion which should halt
367  * its execution.
368  *
369  * @return True, if the execution should be aborted; false otherwise
370  */
371 bool ModelChecker::has_asserted() const
372 {
373         return priv->asserted;
374 }
375
376 /**
377  * Trigger a trace assertion which should cause this execution to be halted.
378  * This can be due to a detected bug or due to an infeasibility that should
379  * halt ASAP.
380  */
381 void ModelChecker::set_assert()
382 {
383         priv->asserted = true;
384 }
385
386 /**
387  * Check if we are in a deadlock. Should only be called at the end of an
388  * execution, although it should not give false positives in the middle of an
389  * execution (there should be some ENABLED thread).
390  *
391  * @return True if program is in a deadlock; false otherwise
392  */
393 bool ModelChecker::is_deadlocked() const
394 {
395         bool blocking_threads = false;
396         for (unsigned int i = 0; i < get_num_threads(); i++) {
397                 thread_id_t tid = int_to_id(i);
398                 if (is_enabled(tid))
399                         return false;
400                 Thread *t = get_thread(tid);
401                 if (!t->is_model_thread() && t->get_pending())
402                         blocking_threads = true;
403         }
404         return blocking_threads;
405 }
406
407 /**
408  * Check if this is a complete execution. That is, have all thread completed
409  * execution (rather than exiting because sleep sets have forced a redundant
410  * execution).
411  *
412  * @return True if the execution is complete.
413  */
414 bool ModelChecker::is_complete_execution() const
415 {
416         for (unsigned int i = 0; i < get_num_threads(); i++)
417                 if (is_enabled(int_to_id(i)))
418                         return false;
419         return true;
420 }
421
422 /**
423  * @brief Assert a bug in the executing program.
424  *
425  * Use this function to assert any sort of bug in the user program. If the
426  * current trace is feasible (actually, a prefix of some feasible execution),
427  * then this execution will be aborted, printing the appropriate message. If
428  * the current trace is not yet feasible, the error message will be stashed and
429  * printed if the execution ever becomes feasible.
430  *
431  * @param msg Descriptive message for the bug (do not include newline char)
432  * @return True if bug is immediately-feasible
433  */
434 bool ModelChecker::assert_bug(const char *msg)
435 {
436         priv->bugs.push_back(new bug_message(msg));
437
438         if (isfeasibleprefix()) {
439                 set_assert();
440                 return true;
441         }
442         return false;
443 }
444
445 /**
446  * @brief Assert a bug in the executing program, asserted by a user thread
447  * @see ModelChecker::assert_bug
448  * @param msg Descriptive message for the bug (do not include newline char)
449  */
450 void ModelChecker::assert_user_bug(const char *msg)
451 {
452         /* If feasible bug, bail out now */
453         if (assert_bug(msg))
454                 switch_to_master(NULL);
455 }
456
457 /** @return True, if any bugs have been reported for this execution */
458 bool ModelChecker::have_bug_reports() const
459 {
460         return priv->bugs.size() != 0;
461 }
462
463 /** @brief Print bug report listing for this execution (if any bugs exist) */
464 void ModelChecker::print_bugs() const
465 {
466         if (have_bug_reports()) {
467                 model_print("Bug report: %zu bug%s detected\n",
468                                 priv->bugs.size(),
469                                 priv->bugs.size() > 1 ? "s" : "");
470                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
471                         priv->bugs[i]->print();
472         }
473 }
474
475 /**
476  * @brief Record end-of-execution stats
477  *
478  * Must be run when exiting an execution. Records various stats.
479  * @see struct execution_stats
480  */
481 void ModelChecker::record_stats()
482 {
483         stats.num_total++;
484         if (!isfeasibleprefix())
485                 stats.num_infeasible++;
486         else if (have_bug_reports())
487                 stats.num_buggy_executions++;
488         else if (is_complete_execution())
489                 stats.num_complete++;
490         else
491                 stats.num_redundant++;
492 }
493
494 /** @brief Print execution stats */
495 void ModelChecker::print_stats() const
496 {
497         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
498         model_print("Number of redundant executions: %d\n", stats.num_redundant);
499         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
500         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
501         model_print("Total executions: %d\n", stats.num_total);
502         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
503 }
504
505 /**
506  * @brief End-of-exeuction print
507  * @param printbugs Should any existing bugs be printed?
508  */
509 void ModelChecker::print_execution(bool printbugs) const
510 {
511         print_program_output();
512
513         if (DBG_ENABLED() || params.verbose) {
514                 model_print("Earliest divergence point since last feasible execution:\n");
515                 if (earliest_diverge)
516                         earliest_diverge->print();
517                 else
518                         model_print("(Not set)\n");
519
520                 model_print("\n");
521                 print_stats();
522         }
523
524         /* Don't print invalid bugs */
525         if (printbugs)
526                 print_bugs();
527
528         model_print("\n");
529         print_summary();
530 }
531
532 /**
533  * Queries the model-checker for more executions to explore and, if one
534  * exists, resets the model-checker state to execute a new execution.
535  *
536  * @return If there are more executions to explore, return true. Otherwise,
537  * return false.
538  */
539 bool ModelChecker::next_execution()
540 {
541         DBG();
542         /* Is this execution a feasible execution that's worth bug-checking? */
543         bool complete = isfeasibleprefix() && (is_complete_execution() ||
544                         have_bug_reports());
545
546         /* End-of-execution bug checks */
547         if (complete) {
548                 if (is_deadlocked())
549                         assert_bug("Deadlock detected");
550
551                 checkDataRaces();
552         }
553
554         record_stats();
555
556         /* Output */
557         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
558                 print_execution(complete);
559         else
560                 clear_program_output();
561
562         if (complete)
563                 earliest_diverge = NULL;
564
565         if ((diverge = get_next_backtrack()) == NULL)
566                 return false;
567
568         if (DBG_ENABLED()) {
569                 model_print("Next execution will diverge at:\n");
570                 diverge->print();
571         }
572
573         reset_to_initial_state();
574         return true;
575 }
576
577 /**
578  * @brief Find the last fence-related backtracking conflict for a ModelAction
579  *
580  * This function performs the search for the most recent conflicting action
581  * against which we should perform backtracking, as affected by fence
582  * operations. This includes pairs of potentially-synchronizing actions which
583  * occur due to fence-acquire or fence-release, and hence should be explored in
584  * the opposite execution order.
585  *
586  * @param act The current action
587  * @return The most recent action which conflicts with act due to fences
588  */
589 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
590 {
591         /* Only perform release/acquire fence backtracking for stores */
592         if (!act->is_write())
593                 return NULL;
594
595         /* Find a fence-release (or, act is a release) */
596         ModelAction *last_release;
597         if (act->is_release())
598                 last_release = act;
599         else
600                 last_release = get_last_fence_release(act->get_tid());
601         if (!last_release)
602                 return NULL;
603
604         /* Skip past the release */
605         action_list_t *list = action_trace;
606         action_list_t::reverse_iterator rit;
607         for (rit = list->rbegin(); rit != list->rend(); rit++)
608                 if (*rit == last_release)
609                         break;
610         ASSERT(rit != list->rend());
611
612         /* Find a prior:
613          *   load-acquire
614          * or
615          *   load --sb-> fence-acquire */
616         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
617         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
618         bool found_acquire_fences = false;
619         for ( ; rit != list->rend(); rit++) {
620                 ModelAction *prev = *rit;
621                 if (act->same_thread(prev))
622                         continue;
623
624                 int tid = id_to_int(prev->get_tid());
625
626                 if (prev->is_read() && act->same_var(prev)) {
627                         if (prev->is_acquire()) {
628                                 /* Found most recent load-acquire, don't need
629                                  * to search for more fences */
630                                 if (!found_acquire_fences)
631                                         return NULL;
632                         } else {
633                                 prior_loads[tid] = prev;
634                         }
635                 }
636                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
637                         found_acquire_fences = true;
638                         acquire_fences[tid] = prev;
639                 }
640         }
641
642         ModelAction *latest_backtrack = NULL;
643         for (unsigned int i = 0; i < acquire_fences.size(); i++)
644                 if (acquire_fences[i] && prior_loads[i])
645                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
646                                 latest_backtrack = acquire_fences[i];
647         return latest_backtrack;
648 }
649
650 /**
651  * @brief Find the last backtracking conflict for a ModelAction
652  *
653  * This function performs the search for the most recent conflicting action
654  * against which we should perform backtracking. This primary includes pairs of
655  * synchronizing actions which should be explored in the opposite execution
656  * order.
657  *
658  * @param act The current action
659  * @return The most recent action which conflicts with act
660  */
661 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
662 {
663         switch (act->get_type()) {
664         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
665         case ATOMIC_READ:
666         case ATOMIC_WRITE:
667         case ATOMIC_RMW: {
668                 ModelAction *ret = NULL;
669
670                 /* linear search: from most recent to oldest */
671                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
672                 action_list_t::reverse_iterator rit;
673                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
674                         ModelAction *prev = *rit;
675                         if (prev->could_synchronize_with(act)) {
676                                 ret = prev;
677                                 break;
678                         }
679                 }
680
681                 ModelAction *ret2 = get_last_fence_conflict(act);
682                 if (!ret2)
683                         return ret;
684                 if (!ret)
685                         return ret2;
686                 if (*ret < *ret2)
687                         return ret2;
688                 return ret;
689         }
690         case ATOMIC_LOCK:
691         case ATOMIC_TRYLOCK: {
692                 /* linear search: from most recent to oldest */
693                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
694                 action_list_t::reverse_iterator rit;
695                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
696                         ModelAction *prev = *rit;
697                         if (act->is_conflicting_lock(prev))
698                                 return prev;
699                 }
700                 break;
701         }
702         case ATOMIC_UNLOCK: {
703                 /* linear search: from most recent to oldest */
704                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
705                 action_list_t::reverse_iterator rit;
706                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
707                         ModelAction *prev = *rit;
708                         if (!act->same_thread(prev) && prev->is_failed_trylock())
709                                 return prev;
710                 }
711                 break;
712         }
713         case ATOMIC_WAIT: {
714                 /* linear search: from most recent to oldest */
715                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
716                 action_list_t::reverse_iterator rit;
717                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
718                         ModelAction *prev = *rit;
719                         if (!act->same_thread(prev) && prev->is_failed_trylock())
720                                 return prev;
721                         if (!act->same_thread(prev) && prev->is_notify())
722                                 return prev;
723                 }
724                 break;
725         }
726
727         case ATOMIC_NOTIFY_ALL:
728         case ATOMIC_NOTIFY_ONE: {
729                 /* linear search: from most recent to oldest */
730                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
731                 action_list_t::reverse_iterator rit;
732                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
733                         ModelAction *prev = *rit;
734                         if (!act->same_thread(prev) && prev->is_wait())
735                                 return prev;
736                 }
737                 break;
738         }
739         default:
740                 break;
741         }
742         return NULL;
743 }
744
745 /** This method finds backtracking points where we should try to
746  * reorder the parameter ModelAction against.
747  *
748  * @param the ModelAction to find backtracking points for.
749  */
750 void ModelChecker::set_backtracking(ModelAction *act)
751 {
752         Thread *t = get_thread(act);
753         ModelAction *prev = get_last_conflict(act);
754         if (prev == NULL)
755                 return;
756
757         Node *node = prev->get_node()->get_parent();
758
759         int low_tid, high_tid;
760         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
761                 low_tid = id_to_int(act->get_tid());
762                 high_tid = low_tid + 1;
763         } else {
764                 low_tid = 0;
765                 high_tid = get_num_threads();
766         }
767
768         for (int i = low_tid; i < high_tid; i++) {
769                 thread_id_t tid = int_to_id(i);
770
771                 /* Make sure this thread can be enabled here. */
772                 if (i >= node->get_num_threads())
773                         break;
774
775                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
776                 if (node->enabled_status(tid) != THREAD_ENABLED)
777                         continue;
778
779                 /* Check if this has been explored already */
780                 if (node->has_been_explored(tid))
781                         continue;
782
783                 /* See if fairness allows */
784                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
785                         bool unfair = false;
786                         for (int t = 0; t < node->get_num_threads(); t++) {
787                                 thread_id_t tother = int_to_id(t);
788                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
789                                         unfair = true;
790                                         break;
791                                 }
792                         }
793                         if (unfair)
794                                 continue;
795                 }
796                 /* Cache the latest backtracking point */
797                 set_latest_backtrack(prev);
798
799                 /* If this is a new backtracking point, mark the tree */
800                 if (!node->set_backtrack(tid))
801                         continue;
802                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
803                                         id_to_int(prev->get_tid()),
804                                         id_to_int(t->get_id()));
805                 if (DBG_ENABLED()) {
806                         prev->print();
807                         act->print();
808                 }
809         }
810 }
811
812 /**
813  * @brief Cache the a backtracking point as the "most recent", if eligible
814  *
815  * Note that this does not prepare the NodeStack for this backtracking
816  * operation, it only caches the action on a per-execution basis
817  *
818  * @param act The operation at which we should explore a different next action
819  * (i.e., backtracking point)
820  * @return True, if this action is now the most recent backtracking point;
821  * false otherwise
822  */
823 bool ModelChecker::set_latest_backtrack(ModelAction *act)
824 {
825         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
826                 priv->next_backtrack = act;
827                 return true;
828         }
829         return false;
830 }
831
832 /**
833  * Returns last backtracking point. The model checker will explore a different
834  * path for this point in the next execution.
835  * @return The ModelAction at which the next execution should diverge.
836  */
837 ModelAction * ModelChecker::get_next_backtrack()
838 {
839         ModelAction *next = priv->next_backtrack;
840         priv->next_backtrack = NULL;
841         return next;
842 }
843
844 /**
845  * Processes a read or rmw model action.
846  * @param curr is the read model action to process.
847  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
848  * @return True if processing this read updates the mo_graph.
849  */
850 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
851 {
852         uint64_t value = VALUE_NONE;
853         bool updated = false;
854         while (true) {
855                 const ModelAction *reads_from = curr->get_node()->get_read_from();
856                 if (reads_from != NULL) {
857                         mo_graph->startChanges();
858
859                         value = reads_from->get_value();
860                         bool r_status = false;
861
862                         if (!second_part_of_rmw) {
863                                 check_recency(curr, reads_from);
864                                 r_status = r_modification_order(curr, reads_from);
865                         }
866
867                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
868                                 mo_graph->rollbackChanges();
869                                 priv->too_many_reads = false;
870                                 continue;
871                         }
872
873                         read_from(curr, reads_from);
874                         mo_graph->commitChanges();
875                         mo_check_promises(curr, true);
876
877                         updated |= r_status;
878                 } else if (!second_part_of_rmw) {
879                         /* Read from future value */
880                         struct future_value fv = curr->get_node()->get_future_value();
881                         Promise *promise = new Promise(curr, fv);
882                         value = fv.value;
883                         curr->set_read_from_promise(promise);
884                         promises->push_back(promise);
885                         mo_graph->startChanges();
886                         updated = r_modification_order(curr, promise);
887                         mo_graph->commitChanges();
888                 }
889                 get_thread(curr)->set_return_value(value);
890                 return updated;
891         }
892 }
893
894 /**
895  * Processes a lock, trylock, or unlock model action.  @param curr is
896  * the read model action to process.
897  *
898  * The try lock operation checks whether the lock is taken.  If not,
899  * it falls to the normal lock operation case.  If so, it returns
900  * fail.
901  *
902  * The lock operation has already been checked that it is enabled, so
903  * it just grabs the lock and synchronizes with the previous unlock.
904  *
905  * The unlock operation has to re-enable all of the threads that are
906  * waiting on the lock.
907  *
908  * @return True if synchronization was updated; false otherwise
909  */
910 bool ModelChecker::process_mutex(ModelAction *curr)
911 {
912         std::mutex *mutex = NULL;
913         struct std::mutex_state *state = NULL;
914
915         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
916                 mutex = (std::mutex *)curr->get_location();
917                 state = mutex->get_state();
918         } else if (curr->is_wait()) {
919                 mutex = (std::mutex *)curr->get_value();
920                 state = mutex->get_state();
921         }
922
923         switch (curr->get_type()) {
924         case ATOMIC_TRYLOCK: {
925                 bool success = !state->islocked;
926                 curr->set_try_lock(success);
927                 if (!success) {
928                         get_thread(curr)->set_return_value(0);
929                         break;
930                 }
931                 get_thread(curr)->set_return_value(1);
932         }
933                 //otherwise fall into the lock case
934         case ATOMIC_LOCK: {
935                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
936                         assert_bug("Lock access before initialization");
937                 state->islocked = true;
938                 ModelAction *unlock = get_last_unlock(curr);
939                 //synchronize with the previous unlock statement
940                 if (unlock != NULL) {
941                         curr->synchronize_with(unlock);
942                         return true;
943                 }
944                 break;
945         }
946         case ATOMIC_UNLOCK: {
947                 //unlock the lock
948                 state->islocked = false;
949                 //wake up the other threads
950                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
951                 //activate all the waiting threads
952                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
953                         scheduler->wake(get_thread(*rit));
954                 }
955                 waiters->clear();
956                 break;
957         }
958         case ATOMIC_WAIT: {
959                 //unlock the lock
960                 state->islocked = false;
961                 //wake up the other threads
962                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
963                 //activate all the waiting threads
964                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
965                         scheduler->wake(get_thread(*rit));
966                 }
967                 waiters->clear();
968                 //check whether we should go to sleep or not...simulate spurious failures
969                 if (curr->get_node()->get_misc() == 0) {
970                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
971                         //disable us
972                         scheduler->sleep(get_thread(curr));
973                 }
974                 break;
975         }
976         case ATOMIC_NOTIFY_ALL: {
977                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
978                 //activate all the waiting threads
979                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
980                         scheduler->wake(get_thread(*rit));
981                 }
982                 waiters->clear();
983                 break;
984         }
985         case ATOMIC_NOTIFY_ONE: {
986                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
987                 int wakeupthread = curr->get_node()->get_misc();
988                 action_list_t::iterator it = waiters->begin();
989                 advance(it, wakeupthread);
990                 scheduler->wake(get_thread(*it));
991                 waiters->erase(it);
992                 break;
993         }
994
995         default:
996                 ASSERT(0);
997         }
998         return false;
999 }
1000
1001 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1002 {
1003         /* Do more ambitious checks now that mo is more complete */
1004         if (mo_may_allow(writer, reader)) {
1005                 Node *node = reader->get_node();
1006
1007                 /* Find an ancestor thread which exists at the time of the reader */
1008                 Thread *write_thread = get_thread(writer);
1009                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1010                         write_thread = write_thread->get_parent();
1011
1012                 struct future_value fv = {
1013                         writer->get_value(),
1014                         writer->get_seq_number() + params.maxfuturedelay,
1015                         write_thread->get_id(),
1016                 };
1017                 if (node->add_future_value(fv))
1018                         set_latest_backtrack(reader);
1019         }
1020 }
1021
1022 /**
1023  * Process a write ModelAction
1024  * @param curr The ModelAction to process
1025  * @return True if the mo_graph was updated or promises were resolved
1026  */
1027 bool ModelChecker::process_write(ModelAction *curr)
1028 {
1029         bool updated_mod_order = w_modification_order(curr);
1030         bool updated_promises = resolve_promises(curr);
1031
1032         if (promises->size() == 0) {
1033                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1034                         struct PendingFutureValue pfv = (*futurevalues)[i];
1035                         add_future_value(pfv.writer, pfv.act);
1036                 }
1037                 futurevalues->clear();
1038         }
1039
1040         mo_graph->commitChanges();
1041         mo_check_promises(curr, false);
1042
1043         get_thread(curr)->set_return_value(VALUE_NONE);
1044         return updated_mod_order || updated_promises;
1045 }
1046
1047 /**
1048  * Process a fence ModelAction
1049  * @param curr The ModelAction to process
1050  * @return True if synchronization was updated
1051  */
1052 bool ModelChecker::process_fence(ModelAction *curr)
1053 {
1054         /*
1055          * fence-relaxed: no-op
1056          * fence-release: only log the occurence (not in this function), for
1057          *   use in later synchronization
1058          * fence-acquire (this function): search for hypothetical release
1059          *   sequences
1060          */
1061         bool updated = false;
1062         if (curr->is_acquire()) {
1063                 action_list_t *list = action_trace;
1064                 action_list_t::reverse_iterator rit;
1065                 /* Find X : is_read(X) && X --sb-> curr */
1066                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1067                         ModelAction *act = *rit;
1068                         if (act == curr)
1069                                 continue;
1070                         if (act->get_tid() != curr->get_tid())
1071                                 continue;
1072                         /* Stop at the beginning of the thread */
1073                         if (act->is_thread_start())
1074                                 break;
1075                         /* Stop once we reach a prior fence-acquire */
1076                         if (act->is_fence() && act->is_acquire())
1077                                 break;
1078                         if (!act->is_read())
1079                                 continue;
1080                         /* read-acquire will find its own release sequences */
1081                         if (act->is_acquire())
1082                                 continue;
1083
1084                         /* Establish hypothetical release sequences */
1085                         rel_heads_list_t release_heads;
1086                         get_release_seq_heads(curr, act, &release_heads);
1087                         for (unsigned int i = 0; i < release_heads.size(); i++)
1088                                 if (!curr->synchronize_with(release_heads[i]))
1089                                         set_bad_synchronization();
1090                         if (release_heads.size() != 0)
1091                                 updated = true;
1092                 }
1093         }
1094         return updated;
1095 }
1096
1097 /**
1098  * @brief Process the current action for thread-related activity
1099  *
1100  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1101  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1102  * synchronization, etc.  This function is a no-op for non-THREAD actions
1103  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1104  *
1105  * @param curr The current action
1106  * @return True if synchronization was updated or a thread completed
1107  */
1108 bool ModelChecker::process_thread_action(ModelAction *curr)
1109 {
1110         bool updated = false;
1111
1112         switch (curr->get_type()) {
1113         case THREAD_CREATE: {
1114                 thrd_t *thrd = (thrd_t *)curr->get_location();
1115                 struct thread_params *params = (struct thread_params *)curr->get_value();
1116                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1117                 add_thread(th);
1118                 th->set_creation(curr);
1119                 /* Promises can be satisfied by children */
1120                 for (unsigned int i = 0; i < promises->size(); i++) {
1121                         Promise *promise = (*promises)[i];
1122                         if (promise->thread_is_available(curr->get_tid()))
1123                                 promise->add_thread(th->get_id());
1124                 }
1125                 break;
1126         }
1127         case THREAD_JOIN: {
1128                 Thread *blocking = curr->get_thread_operand();
1129                 ModelAction *act = get_last_action(blocking->get_id());
1130                 curr->synchronize_with(act);
1131                 updated = true; /* trigger rel-seq checks */
1132                 break;
1133         }
1134         case THREAD_FINISH: {
1135                 Thread *th = get_thread(curr);
1136                 while (!th->wait_list_empty()) {
1137                         ModelAction *act = th->pop_wait_list();
1138                         scheduler->wake(get_thread(act));
1139                 }
1140                 th->complete();
1141                 /* Completed thread can't satisfy promises */
1142                 for (unsigned int i = 0; i < promises->size(); i++) {
1143                         Promise *promise = (*promises)[i];
1144                         if (promise->thread_is_available(th->get_id()))
1145                                 if (promise->eliminate_thread(th->get_id()))
1146                                         priv->failed_promise = true;
1147                 }
1148                 updated = true; /* trigger rel-seq checks */
1149                 break;
1150         }
1151         case THREAD_START: {
1152                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1153                 break;
1154         }
1155         default:
1156                 break;
1157         }
1158
1159         return updated;
1160 }
1161
1162 /**
1163  * @brief Process the current action for release sequence fixup activity
1164  *
1165  * Performs model-checker release sequence fixups for the current action,
1166  * forcing a single pending release sequence to break (with a given, potential
1167  * "loose" write) or to complete (i.e., synchronize). If a pending release
1168  * sequence forms a complete release sequence, then we must perform the fixup
1169  * synchronization, mo_graph additions, etc.
1170  *
1171  * @param curr The current action; must be a release sequence fixup action
1172  * @param work_queue The work queue to which to add work items as they are
1173  * generated
1174  */
1175 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1176 {
1177         const ModelAction *write = curr->get_node()->get_relseq_break();
1178         struct release_seq *sequence = pending_rel_seqs->back();
1179         pending_rel_seqs->pop_back();
1180         ASSERT(sequence);
1181         ModelAction *acquire = sequence->acquire;
1182         const ModelAction *rf = sequence->rf;
1183         const ModelAction *release = sequence->release;
1184         ASSERT(acquire);
1185         ASSERT(release);
1186         ASSERT(rf);
1187         ASSERT(release->same_thread(rf));
1188
1189         if (write == NULL) {
1190                 /**
1191                  * @todo Forcing a synchronization requires that we set
1192                  * modification order constraints. For instance, we can't allow
1193                  * a fixup sequence in which two separate read-acquire
1194                  * operations read from the same sequence, where the first one
1195                  * synchronizes and the other doesn't. Essentially, we can't
1196                  * allow any writes to insert themselves between 'release' and
1197                  * 'rf'
1198                  */
1199
1200                 /* Must synchronize */
1201                 if (!acquire->synchronize_with(release)) {
1202                         set_bad_synchronization();
1203                         return;
1204                 }
1205                 /* Re-check all pending release sequences */
1206                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1207                 /* Re-check act for mo_graph edges */
1208                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1209
1210                 /* propagate synchronization to later actions */
1211                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1212                 for (; (*rit) != acquire; rit++) {
1213                         ModelAction *propagate = *rit;
1214                         if (acquire->happens_before(propagate)) {
1215                                 propagate->synchronize_with(acquire);
1216                                 /* Re-check 'propagate' for mo_graph edges */
1217                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1218                         }
1219                 }
1220         } else {
1221                 /* Break release sequence with new edges:
1222                  *   release --mo--> write --mo--> rf */
1223                 mo_graph->addEdge(release, write);
1224                 mo_graph->addEdge(write, rf);
1225         }
1226
1227         /* See if we have realized a data race */
1228         checkDataRaces();
1229 }
1230
1231 /**
1232  * Initialize the current action by performing one or more of the following
1233  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1234  * in the NodeStack, manipulating backtracking sets, allocating and
1235  * initializing clock vectors, and computing the promises to fulfill.
1236  *
1237  * @param curr The current action, as passed from the user context; may be
1238  * freed/invalidated after the execution of this function, with a different
1239  * action "returned" its place (pass-by-reference)
1240  * @return True if curr is a newly-explored action; false otherwise
1241  */
1242 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1243 {
1244         ModelAction *newcurr;
1245
1246         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1247                 newcurr = process_rmw(*curr);
1248                 delete *curr;
1249
1250                 if (newcurr->is_rmw())
1251                         compute_promises(newcurr);
1252
1253                 *curr = newcurr;
1254                 return false;
1255         }
1256
1257         (*curr)->set_seq_number(get_next_seq_num());
1258
1259         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1260         if (newcurr) {
1261                 /* First restore type and order in case of RMW operation */
1262                 if ((*curr)->is_rmwr())
1263                         newcurr->copy_typeandorder(*curr);
1264
1265                 ASSERT((*curr)->get_location() == newcurr->get_location());
1266                 newcurr->copy_from_new(*curr);
1267
1268                 /* Discard duplicate ModelAction; use action from NodeStack */
1269                 delete *curr;
1270
1271                 /* Always compute new clock vector */
1272                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1273
1274                 *curr = newcurr;
1275                 return false; /* Action was explored previously */
1276         } else {
1277                 newcurr = *curr;
1278
1279                 /* Always compute new clock vector */
1280                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1281
1282                 /* Assign most recent release fence */
1283                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1284
1285                 /*
1286                  * Perform one-time actions when pushing new ModelAction onto
1287                  * NodeStack
1288                  */
1289                 if (newcurr->is_write())
1290                         compute_promises(newcurr);
1291                 else if (newcurr->is_relseq_fixup())
1292                         compute_relseq_breakwrites(newcurr);
1293                 else if (newcurr->is_wait())
1294                         newcurr->get_node()->set_misc_max(2);
1295                 else if (newcurr->is_notify_one()) {
1296                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1297                 }
1298                 return true; /* This was a new ModelAction */
1299         }
1300 }
1301
1302 /**
1303  * @brief Establish reads-from relation between two actions
1304  *
1305  * Perform basic operations involved with establishing a concrete rf relation,
1306  * including setting the ModelAction data and checking for release sequences.
1307  *
1308  * @param act The action that is reading (must be a read)
1309  * @param rf The action from which we are reading (must be a write)
1310  *
1311  * @return True if this read established synchronization
1312  */
1313 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1314 {
1315         act->set_read_from(rf);
1316         if (rf != NULL && act->is_acquire()) {
1317                 rel_heads_list_t release_heads;
1318                 get_release_seq_heads(act, act, &release_heads);
1319                 int num_heads = release_heads.size();
1320                 for (unsigned int i = 0; i < release_heads.size(); i++)
1321                         if (!act->synchronize_with(release_heads[i])) {
1322                                 set_bad_synchronization();
1323                                 num_heads--;
1324                         }
1325                 return num_heads > 0;
1326         }
1327         return false;
1328 }
1329
1330 /**
1331  * @brief Check whether a model action is enabled.
1332  *
1333  * Checks whether a lock or join operation would be successful (i.e., is the
1334  * lock already locked, or is the joined thread already complete). If not, put
1335  * the action in a waiter list.
1336  *
1337  * @param curr is the ModelAction to check whether it is enabled.
1338  * @return a bool that indicates whether the action is enabled.
1339  */
1340 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1341         if (curr->is_lock()) {
1342                 std::mutex *lock = (std::mutex *)curr->get_location();
1343                 struct std::mutex_state *state = lock->get_state();
1344                 if (state->islocked) {
1345                         //Stick the action in the appropriate waiting queue
1346                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1347                         return false;
1348                 }
1349         } else if (curr->get_type() == THREAD_JOIN) {
1350                 Thread *blocking = (Thread *)curr->get_location();
1351                 if (!blocking->is_complete()) {
1352                         blocking->push_wait_list(curr);
1353                         return false;
1354                 }
1355         }
1356
1357         return true;
1358 }
1359
1360 /**
1361  * This is the heart of the model checker routine. It performs model-checking
1362  * actions corresponding to a given "current action." Among other processes, it
1363  * calculates reads-from relationships, updates synchronization clock vectors,
1364  * forms a memory_order constraints graph, and handles replay/backtrack
1365  * execution when running permutations of previously-observed executions.
1366  *
1367  * @param curr The current action to process
1368  * @return The ModelAction that is actually executed; may be different than
1369  * curr; may be NULL, if the current action is not enabled to run
1370  */
1371 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1372 {
1373         ASSERT(curr);
1374         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1375
1376         if (!check_action_enabled(curr)) {
1377                 /* Make the execution look like we chose to run this action
1378                  * much later, when a lock/join can succeed */
1379                 get_thread(curr)->set_pending(curr);
1380                 scheduler->sleep(get_thread(curr));
1381                 return NULL;
1382         }
1383
1384         bool newly_explored = initialize_curr_action(&curr);
1385
1386         DBG();
1387         if (DBG_ENABLED())
1388                 curr->print();
1389
1390         wake_up_sleeping_actions(curr);
1391
1392         /* Add the action to lists before any other model-checking tasks */
1393         if (!second_part_of_rmw)
1394                 add_action_to_lists(curr);
1395
1396         /* Build may_read_from set for newly-created actions */
1397         if (newly_explored && curr->is_read())
1398                 build_may_read_from(curr);
1399
1400         /* Initialize work_queue with the "current action" work */
1401         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1402         while (!work_queue.empty() && !has_asserted()) {
1403                 WorkQueueEntry work = work_queue.front();
1404                 work_queue.pop_front();
1405
1406                 switch (work.type) {
1407                 case WORK_CHECK_CURR_ACTION: {
1408                         ModelAction *act = work.action;
1409                         bool update = false; /* update this location's release seq's */
1410                         bool update_all = false; /* update all release seq's */
1411
1412                         if (process_thread_action(curr))
1413                                 update_all = true;
1414
1415                         if (act->is_read() && process_read(act, second_part_of_rmw))
1416                                 update = true;
1417
1418                         if (act->is_write() && process_write(act))
1419                                 update = true;
1420
1421                         if (act->is_fence() && process_fence(act))
1422                                 update_all = true;
1423
1424                         if (act->is_mutex_op() && process_mutex(act))
1425                                 update_all = true;
1426
1427                         if (act->is_relseq_fixup())
1428                                 process_relseq_fixup(curr, &work_queue);
1429
1430                         if (update_all)
1431                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1432                         else if (update)
1433                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1434                         break;
1435                 }
1436                 case WORK_CHECK_RELEASE_SEQ:
1437                         resolve_release_sequences(work.location, &work_queue);
1438                         break;
1439                 case WORK_CHECK_MO_EDGES: {
1440                         /** @todo Complete verification of work_queue */
1441                         ModelAction *act = work.action;
1442                         bool updated = false;
1443
1444                         if (act->is_read()) {
1445                                 const ModelAction *rf = act->get_reads_from();
1446                                 const Promise *promise = act->get_reads_from_promise();
1447                                 if (rf) {
1448                                         if (r_modification_order(act, rf))
1449                                                 updated = true;
1450                                 } else if (promise) {
1451                                         if (r_modification_order(act, promise))
1452                                                 updated = true;
1453                                 }
1454                         }
1455                         if (act->is_write()) {
1456                                 if (w_modification_order(act))
1457                                         updated = true;
1458                         }
1459                         mo_graph->commitChanges();
1460
1461                         if (updated)
1462                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1463                         break;
1464                 }
1465                 default:
1466                         ASSERT(false);
1467                         break;
1468                 }
1469         }
1470
1471         check_curr_backtracking(curr);
1472         set_backtracking(curr);
1473         return curr;
1474 }
1475
1476 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1477 {
1478         Node *currnode = curr->get_node();
1479         Node *parnode = currnode->get_parent();
1480
1481         if ((parnode && !parnode->backtrack_empty()) ||
1482                          !currnode->misc_empty() ||
1483                          !currnode->read_from_empty() ||
1484                          !currnode->future_value_empty() ||
1485                          !currnode->promise_empty() ||
1486                          !currnode->relseq_break_empty()) {
1487                 set_latest_backtrack(curr);
1488         }
1489 }
1490
1491 bool ModelChecker::promises_expired() const
1492 {
1493         for (unsigned int i = 0; i < promises->size(); i++) {
1494                 Promise *promise = (*promises)[i];
1495                 if (promise->get_expiration() < priv->used_sequence_numbers)
1496                         return true;
1497         }
1498         return false;
1499 }
1500
1501 /**
1502  * This is the strongest feasibility check available.
1503  * @return whether the current trace (partial or complete) must be a prefix of
1504  * a feasible trace.
1505  */
1506 bool ModelChecker::isfeasibleprefix() const
1507 {
1508         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1509 }
1510
1511 /**
1512  * Print disagnostic information about an infeasible execution
1513  * @param prefix A string to prefix the output with; if NULL, then a default
1514  * message prefix will be provided
1515  */
1516 void ModelChecker::print_infeasibility(const char *prefix) const
1517 {
1518         char buf[100];
1519         char *ptr = buf;
1520         if (mo_graph->checkForCycles())
1521                 ptr += sprintf(ptr, "[mo cycle]");
1522         if (priv->failed_promise)
1523                 ptr += sprintf(ptr, "[failed promise]");
1524         if (priv->too_many_reads)
1525                 ptr += sprintf(ptr, "[too many reads]");
1526         if (priv->no_valid_reads)
1527                 ptr += sprintf(ptr, "[no valid reads-from]");
1528         if (priv->bad_synchronization)
1529                 ptr += sprintf(ptr, "[bad sw ordering]");
1530         if (promises_expired())
1531                 ptr += sprintf(ptr, "[promise expired]");
1532         if (promises->size() != 0)
1533                 ptr += sprintf(ptr, "[unresolved promise]");
1534         if (ptr != buf)
1535                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1536 }
1537
1538 /**
1539  * Returns whether the current completed trace is feasible, except for pending
1540  * release sequences.
1541  */
1542 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1543 {
1544         return !is_infeasible() && promises->size() == 0;
1545 }
1546
1547 /**
1548  * Check if the current partial trace is infeasible. Does not check any
1549  * end-of-execution flags, which might rule out the execution. Thus, this is
1550  * useful only for ruling an execution as infeasible.
1551  * @return whether the current partial trace is infeasible.
1552  */
1553 bool ModelChecker::is_infeasible() const
1554 {
1555         return mo_graph->checkForCycles() ||
1556                 priv->no_valid_reads ||
1557                 priv->failed_promise ||
1558                 priv->too_many_reads ||
1559                 priv->bad_synchronization ||
1560                 promises_expired();
1561 }
1562
1563 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1564 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1565         ModelAction *lastread = get_last_action(act->get_tid());
1566         lastread->process_rmw(act);
1567         if (act->is_rmw()) {
1568                 if (lastread->get_reads_from())
1569                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1570                 else
1571                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1572                 mo_graph->commitChanges();
1573         }
1574         return lastread;
1575 }
1576
1577 /**
1578  * Checks whether a thread has read from the same write for too many times
1579  * without seeing the effects of a later write.
1580  *
1581  * Basic idea:
1582  * 1) there must a different write that we could read from that would satisfy the modification order,
1583  * 2) we must have read from the same value in excess of maxreads times, and
1584  * 3) that other write must have been in the reads_from set for maxreads times.
1585  *
1586  * If so, we decide that the execution is no longer feasible.
1587  */
1588 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1589 {
1590         if (params.maxreads != 0) {
1591                 if (curr->get_node()->get_read_from_size() <= 1)
1592                         return;
1593                 //Must make sure that execution is currently feasible...  We could
1594                 //accidentally clear by rolling back
1595                 if (is_infeasible())
1596                         return;
1597                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1598                 int tid = id_to_int(curr->get_tid());
1599
1600                 /* Skip checks */
1601                 if ((int)thrd_lists->size() <= tid)
1602                         return;
1603                 action_list_t *list = &(*thrd_lists)[tid];
1604
1605                 action_list_t::reverse_iterator rit = list->rbegin();
1606                 /* Skip past curr */
1607                 for (; (*rit) != curr; rit++)
1608                         ;
1609                 /* go past curr now */
1610                 rit++;
1611
1612                 action_list_t::reverse_iterator ritcopy = rit;
1613                 //See if we have enough reads from the same value
1614                 int count = 0;
1615                 for (; count < params.maxreads; rit++, count++) {
1616                         if (rit == list->rend())
1617                                 return;
1618                         ModelAction *act = *rit;
1619                         if (!act->is_read())
1620                                 return;
1621
1622                         if (act->get_reads_from() != rf)
1623                                 return;
1624                         if (act->get_node()->get_read_from_size() <= 1)
1625                                 return;
1626                 }
1627                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1628                         /* Get write */
1629                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1630
1631                         /* Need a different write */
1632                         if (write == rf)
1633                                 continue;
1634
1635                         /* Test to see whether this is a feasible write to read from */
1636                         /** NOTE: all members of read-from set should be
1637                          *  feasible, so we no longer check it here **/
1638
1639                         rit = ritcopy;
1640
1641                         bool feasiblewrite = true;
1642                         //new we need to see if this write works for everyone
1643
1644                         for (int loop = count; loop > 0; loop--, rit++) {
1645                                 ModelAction *act = *rit;
1646                                 bool foundvalue = false;
1647                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1648                                         if (act->get_node()->get_read_from_at(j) == write) {
1649                                                 foundvalue = true;
1650                                                 break;
1651                                         }
1652                                 }
1653                                 if (!foundvalue) {
1654                                         feasiblewrite = false;
1655                                         break;
1656                                 }
1657                         }
1658                         if (feasiblewrite) {
1659                                 priv->too_many_reads = true;
1660                                 return;
1661                         }
1662                 }
1663         }
1664 }
1665
1666 /**
1667  * Updates the mo_graph with the constraints imposed from the current
1668  * read.
1669  *
1670  * Basic idea is the following: Go through each other thread and find
1671  * the last action that happened before our read.  Two cases:
1672  *
1673  * (1) The action is a write => that write must either occur before
1674  * the write we read from or be the write we read from.
1675  *
1676  * (2) The action is a read => the write that that action read from
1677  * must occur before the write we read from or be the same write.
1678  *
1679  * @param curr The current action. Must be a read.
1680  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1681  * @return True if modification order edges were added; false otherwise
1682  */
1683 template <typename rf_type>
1684 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1685 {
1686         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1687         unsigned int i;
1688         bool added = false;
1689         ASSERT(curr->is_read());
1690
1691         /* Last SC fence in the current thread */
1692         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1693
1694         /* Iterate over all threads */
1695         for (i = 0; i < thrd_lists->size(); i++) {
1696                 /* Last SC fence in thread i */
1697                 ModelAction *last_sc_fence_thread_local = NULL;
1698                 if (int_to_id((int)i) != curr->get_tid())
1699                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1700
1701                 /* Last SC fence in thread i, before last SC fence in current thread */
1702                 ModelAction *last_sc_fence_thread_before = NULL;
1703                 if (last_sc_fence_local)
1704                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1705
1706                 /* Iterate over actions in thread, starting from most recent */
1707                 action_list_t *list = &(*thrd_lists)[i];
1708                 action_list_t::reverse_iterator rit;
1709                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1710                         ModelAction *act = *rit;
1711
1712                         if (act->is_write() && !act->equals(rf) && act != curr) {
1713                                 /* C++, Section 29.3 statement 5 */
1714                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1715                                                 *act < *last_sc_fence_thread_local) {
1716                                         added = mo_graph->addEdge(act, rf) || added;
1717                                         break;
1718                                 }
1719                                 /* C++, Section 29.3 statement 4 */
1720                                 else if (act->is_seqcst() && last_sc_fence_local &&
1721                                                 *act < *last_sc_fence_local) {
1722                                         added = mo_graph->addEdge(act, rf) || added;
1723                                         break;
1724                                 }
1725                                 /* C++, Section 29.3 statement 6 */
1726                                 else if (last_sc_fence_thread_before &&
1727                                                 *act < *last_sc_fence_thread_before) {
1728                                         added = mo_graph->addEdge(act, rf) || added;
1729                                         break;
1730                                 }
1731                         }
1732
1733                         /*
1734                          * Include at most one act per-thread that "happens
1735                          * before" curr. Don't consider reflexively.
1736                          */
1737                         if (act->happens_before(curr) && act != curr) {
1738                                 if (act->is_write()) {
1739                                         if (!act->equals(rf)) {
1740                                                 added = mo_graph->addEdge(act, rf) || added;
1741                                         }
1742                                 } else {
1743                                         const ModelAction *prevreadfrom = act->get_reads_from();
1744                                         //if the previous read is unresolved, keep going...
1745                                         if (prevreadfrom == NULL)
1746                                                 continue;
1747
1748                                         if (!prevreadfrom->equals(rf)) {
1749                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1750                                         }
1751                                 }
1752                                 break;
1753                         }
1754                 }
1755         }
1756
1757         /*
1758          * All compatible, thread-exclusive promises must be ordered after any
1759          * concrete loads from the same thread
1760          */
1761         for (unsigned int i = 0; i < promises->size(); i++)
1762                 if ((*promises)[i]->is_compatible_exclusive(curr))
1763                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1764
1765         return added;
1766 }
1767
1768 /**
1769  * Updates the mo_graph with the constraints imposed from the current write.
1770  *
1771  * Basic idea is the following: Go through each other thread and find
1772  * the lastest action that happened before our write.  Two cases:
1773  *
1774  * (1) The action is a write => that write must occur before
1775  * the current write
1776  *
1777  * (2) The action is a read => the write that that action read from
1778  * must occur before the current write.
1779  *
1780  * This method also handles two other issues:
1781  *
1782  * (I) Sequential Consistency: Making sure that if the current write is
1783  * seq_cst, that it occurs after the previous seq_cst write.
1784  *
1785  * (II) Sending the write back to non-synchronizing reads.
1786  *
1787  * @param curr The current action. Must be a write.
1788  * @return True if modification order edges were added; false otherwise
1789  */
1790 bool ModelChecker::w_modification_order(ModelAction *curr)
1791 {
1792         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1793         unsigned int i;
1794         bool added = false;
1795         ASSERT(curr->is_write());
1796
1797         if (curr->is_seqcst()) {
1798                 /* We have to at least see the last sequentially consistent write,
1799                          so we are initialized. */
1800                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1801                 if (last_seq_cst != NULL) {
1802                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1803                 }
1804         }
1805
1806         /* Last SC fence in the current thread */
1807         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1808
1809         /* Iterate over all threads */
1810         for (i = 0; i < thrd_lists->size(); i++) {
1811                 /* Last SC fence in thread i, before last SC fence in current thread */
1812                 ModelAction *last_sc_fence_thread_before = NULL;
1813                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1814                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1815
1816                 /* Iterate over actions in thread, starting from most recent */
1817                 action_list_t *list = &(*thrd_lists)[i];
1818                 action_list_t::reverse_iterator rit;
1819                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1820                         ModelAction *act = *rit;
1821                         if (act == curr) {
1822                                 /*
1823                                  * 1) If RMW and it actually read from something, then we
1824                                  * already have all relevant edges, so just skip to next
1825                                  * thread.
1826                                  *
1827                                  * 2) If RMW and it didn't read from anything, we should
1828                                  * whatever edge we can get to speed up convergence.
1829                                  *
1830                                  * 3) If normal write, we need to look at earlier actions, so
1831                                  * continue processing list.
1832                                  */
1833                                 if (curr->is_rmw()) {
1834                                         if (curr->get_reads_from() != NULL)
1835                                                 break;
1836                                         else
1837                                                 continue;
1838                                 } else
1839                                         continue;
1840                         }
1841
1842                         /* C++, Section 29.3 statement 7 */
1843                         if (last_sc_fence_thread_before && act->is_write() &&
1844                                         *act < *last_sc_fence_thread_before) {
1845                                 added = mo_graph->addEdge(act, curr) || added;
1846                                 break;
1847                         }
1848
1849                         /*
1850                          * Include at most one act per-thread that "happens
1851                          * before" curr
1852                          */
1853                         if (act->happens_before(curr)) {
1854                                 /*
1855                                  * Note: if act is RMW, just add edge:
1856                                  *   act --mo--> curr
1857                                  * The following edge should be handled elsewhere:
1858                                  *   readfrom(act) --mo--> act
1859                                  */
1860                                 if (act->is_write())
1861                                         added = mo_graph->addEdge(act, curr) || added;
1862                                 else if (act->is_read()) {
1863                                         //if previous read accessed a null, just keep going
1864                                         if (act->get_reads_from() == NULL)
1865                                                 continue;
1866                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1867                                 }
1868                                 break;
1869                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1870                                                      !act->same_thread(curr)) {
1871                                 /* We have an action that:
1872                                    (1) did not happen before us
1873                                    (2) is a read and we are a write
1874                                    (3) cannot synchronize with us
1875                                    (4) is in a different thread
1876                                    =>
1877                                    that read could potentially read from our write.  Note that
1878                                    these checks are overly conservative at this point, we'll
1879                                    do more checks before actually removing the
1880                                    pendingfuturevalue.
1881
1882                                  */
1883                                 if (thin_air_constraint_may_allow(curr, act)) {
1884                                         if (!is_infeasible())
1885                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1886                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1887                                                 add_future_value(curr, act);
1888                                 }
1889                         }
1890                 }
1891         }
1892
1893         /*
1894          * All compatible, thread-exclusive promises must be ordered after any
1895          * concrete stores to the same thread, or else they can be merged with
1896          * this store later
1897          */
1898         for (unsigned int i = 0; i < promises->size(); i++)
1899                 if ((*promises)[i]->is_compatible_exclusive(curr))
1900                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1901
1902         return added;
1903 }
1904
1905 /** Arbitrary reads from the future are not allowed.  Section 29.3
1906  * part 9 places some constraints.  This method checks one result of constraint
1907  * constraint.  Others require compiler support. */
1908 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1909 {
1910         if (!writer->is_rmw())
1911                 return true;
1912
1913         if (!reader->is_rmw())
1914                 return true;
1915
1916         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1917                 if (search == reader)
1918                         return false;
1919                 if (search->get_tid() == reader->get_tid() &&
1920                                 search->happens_before(reader))
1921                         break;
1922         }
1923
1924         return true;
1925 }
1926
1927 /**
1928  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1929  * some constraints. This method checks one the following constraint (others
1930  * require compiler support):
1931  *
1932  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1933  */
1934 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1935 {
1936         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1937         unsigned int i;
1938         /* Iterate over all threads */
1939         for (i = 0; i < thrd_lists->size(); i++) {
1940                 const ModelAction *write_after_read = NULL;
1941
1942                 /* Iterate over actions in thread, starting from most recent */
1943                 action_list_t *list = &(*thrd_lists)[i];
1944                 action_list_t::reverse_iterator rit;
1945                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1946                         ModelAction *act = *rit;
1947
1948                         /* Don't disallow due to act == reader */
1949                         if (!reader->happens_before(act) || reader == act)
1950                                 break;
1951                         else if (act->is_write())
1952                                 write_after_read = act;
1953                         else if (act->is_read() && act->get_reads_from() != NULL)
1954                                 write_after_read = act->get_reads_from();
1955                 }
1956
1957                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1958                         return false;
1959         }
1960         return true;
1961 }
1962
1963 /**
1964  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1965  * The ModelAction under consideration is expected to be taking part in
1966  * release/acquire synchronization as an object of the "reads from" relation.
1967  * Note that this can only provide release sequence support for RMW chains
1968  * which do not read from the future, as those actions cannot be traced until
1969  * their "promise" is fulfilled. Similarly, we may not even establish the
1970  * presence of a release sequence with certainty, as some modification order
1971  * constraints may be decided further in the future. Thus, this function
1972  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1973  * and a boolean representing certainty.
1974  *
1975  * @param rf The action that might be part of a release sequence. Must be a
1976  * write.
1977  * @param release_heads A pass-by-reference style return parameter. After
1978  * execution of this function, release_heads will contain the heads of all the
1979  * relevant release sequences, if any exists with certainty
1980  * @param pending A pass-by-reference style return parameter which is only used
1981  * when returning false (i.e., uncertain). Returns most information regarding
1982  * an uncertain release sequence, including any write operations that might
1983  * break the sequence.
1984  * @return true, if the ModelChecker is certain that release_heads is complete;
1985  * false otherwise
1986  */
1987 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1988                 rel_heads_list_t *release_heads,
1989                 struct release_seq *pending) const
1990 {
1991         /* Only check for release sequences if there are no cycles */
1992         if (mo_graph->checkForCycles())
1993                 return false;
1994
1995         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1996                 ASSERT(rf->is_write());
1997
1998                 if (rf->is_release())
1999                         release_heads->push_back(rf);
2000                 else if (rf->get_last_fence_release())
2001                         release_heads->push_back(rf->get_last_fence_release());
2002                 if (!rf->is_rmw())
2003                         break; /* End of RMW chain */
2004
2005                 /** @todo Need to be smarter here...  In the linux lock
2006                  * example, this will run to the beginning of the program for
2007                  * every acquire. */
2008                 /** @todo The way to be smarter here is to keep going until 1
2009                  * thread has a release preceded by an acquire and you've seen
2010                  *       both. */
2011
2012                 /* acq_rel RMW is a sufficient stopping condition */
2013                 if (rf->is_acquire() && rf->is_release())
2014                         return true; /* complete */
2015         };
2016         if (!rf) {
2017                 /* read from future: need to settle this later */
2018                 pending->rf = NULL;
2019                 return false; /* incomplete */
2020         }
2021
2022         if (rf->is_release())
2023                 return true; /* complete */
2024
2025         /* else relaxed write
2026          * - check for fence-release in the same thread (29.8, stmt. 3)
2027          * - check modification order for contiguous subsequence
2028          *   -> rf must be same thread as release */
2029
2030         const ModelAction *fence_release = rf->get_last_fence_release();
2031         /* Synchronize with a fence-release unconditionally; we don't need to
2032          * find any more "contiguous subsequence..." for it */
2033         if (fence_release)
2034                 release_heads->push_back(fence_release);
2035
2036         int tid = id_to_int(rf->get_tid());
2037         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2038         action_list_t *list = &(*thrd_lists)[tid];
2039         action_list_t::const_reverse_iterator rit;
2040
2041         /* Find rf in the thread list */
2042         rit = std::find(list->rbegin(), list->rend(), rf);
2043         ASSERT(rit != list->rend());
2044
2045         /* Find the last {write,fence}-release */
2046         for (; rit != list->rend(); rit++) {
2047                 if (fence_release && *(*rit) < *fence_release)
2048                         break;
2049                 if ((*rit)->is_release())
2050                         break;
2051         }
2052         if (rit == list->rend()) {
2053                 /* No write-release in this thread */
2054                 return true; /* complete */
2055         } else if (fence_release && *(*rit) < *fence_release) {
2056                 /* The fence-release is more recent (and so, "stronger") than
2057                  * the most recent write-release */
2058                 return true; /* complete */
2059         } /* else, need to establish contiguous release sequence */
2060         ModelAction *release = *rit;
2061
2062         ASSERT(rf->same_thread(release));
2063
2064         pending->writes.clear();
2065
2066         bool certain = true;
2067         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2068                 if (id_to_int(rf->get_tid()) == (int)i)
2069                         continue;
2070                 list = &(*thrd_lists)[i];
2071
2072                 /* Can we ensure no future writes from this thread may break
2073                  * the release seq? */
2074                 bool future_ordered = false;
2075
2076                 ModelAction *last = get_last_action(int_to_id(i));
2077                 Thread *th = get_thread(int_to_id(i));
2078                 if ((last && rf->happens_before(last)) ||
2079                                 !is_enabled(th) ||
2080                                 th->is_complete())
2081                         future_ordered = true;
2082
2083                 ASSERT(!th->is_model_thread() || future_ordered);
2084
2085                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2086                         const ModelAction *act = *rit;
2087                         /* Reach synchronization -> this thread is complete */
2088                         if (act->happens_before(release))
2089                                 break;
2090                         if (rf->happens_before(act)) {
2091                                 future_ordered = true;
2092                                 continue;
2093                         }
2094
2095                         /* Only non-RMW writes can break release sequences */
2096                         if (!act->is_write() || act->is_rmw())
2097                                 continue;
2098
2099                         /* Check modification order */
2100                         if (mo_graph->checkReachable(rf, act)) {
2101                                 /* rf --mo--> act */
2102                                 future_ordered = true;
2103                                 continue;
2104                         }
2105                         if (mo_graph->checkReachable(act, release))
2106                                 /* act --mo--> release */
2107                                 break;
2108                         if (mo_graph->checkReachable(release, act) &&
2109                                       mo_graph->checkReachable(act, rf)) {
2110                                 /* release --mo-> act --mo--> rf */
2111                                 return true; /* complete */
2112                         }
2113                         /* act may break release sequence */
2114                         pending->writes.push_back(act);
2115                         certain = false;
2116                 }
2117                 if (!future_ordered)
2118                         certain = false; /* This thread is uncertain */
2119         }
2120
2121         if (certain) {
2122                 release_heads->push_back(release);
2123                 pending->writes.clear();
2124         } else {
2125                 pending->release = release;
2126                 pending->rf = rf;
2127         }
2128         return certain;
2129 }
2130
2131 /**
2132  * An interface for getting the release sequence head(s) with which a
2133  * given ModelAction must synchronize. This function only returns a non-empty
2134  * result when it can locate a release sequence head with certainty. Otherwise,
2135  * it may mark the internal state of the ModelChecker so that it will handle
2136  * the release sequence at a later time, causing @a acquire to update its
2137  * synchronization at some later point in execution.
2138  *
2139  * @param acquire The 'acquire' action that may synchronize with a release
2140  * sequence
2141  * @param read The read action that may read from a release sequence; this may
2142  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2143  * when 'acquire' is a fence-acquire)
2144  * @param release_heads A pass-by-reference return parameter. Will be filled
2145  * with the head(s) of the release sequence(s), if they exists with certainty.
2146  * @see ModelChecker::release_seq_heads
2147  */
2148 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2149                 ModelAction *read, rel_heads_list_t *release_heads)
2150 {
2151         const ModelAction *rf = read->get_reads_from();
2152         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2153         sequence->acquire = acquire;
2154         sequence->read = read;
2155
2156         if (!release_seq_heads(rf, release_heads, sequence)) {
2157                 /* add act to 'lazy checking' list */
2158                 pending_rel_seqs->push_back(sequence);
2159         } else {
2160                 snapshot_free(sequence);
2161         }
2162 }
2163
2164 /**
2165  * Attempt to resolve all stashed operations that might synchronize with a
2166  * release sequence for a given location. This implements the "lazy" portion of
2167  * determining whether or not a release sequence was contiguous, since not all
2168  * modification order information is present at the time an action occurs.
2169  *
2170  * @param location The location/object that should be checked for release
2171  * sequence resolutions. A NULL value means to check all locations.
2172  * @param work_queue The work queue to which to add work items as they are
2173  * generated
2174  * @return True if any updates occurred (new synchronization, new mo_graph
2175  * edges)
2176  */
2177 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2178 {
2179         bool updated = false;
2180         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2181         while (it != pending_rel_seqs->end()) {
2182                 struct release_seq *pending = *it;
2183                 ModelAction *acquire = pending->acquire;
2184                 const ModelAction *read = pending->read;
2185
2186                 /* Only resolve sequences on the given location, if provided */
2187                 if (location && read->get_location() != location) {
2188                         it++;
2189                         continue;
2190                 }
2191
2192                 const ModelAction *rf = read->get_reads_from();
2193                 rel_heads_list_t release_heads;
2194                 bool complete;
2195                 complete = release_seq_heads(rf, &release_heads, pending);
2196                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2197                         if (!acquire->has_synchronized_with(release_heads[i])) {
2198                                 if (acquire->synchronize_with(release_heads[i]))
2199                                         updated = true;
2200                                 else
2201                                         set_bad_synchronization();
2202                         }
2203                 }
2204
2205                 if (updated) {
2206                         /* Re-check all pending release sequences */
2207                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2208                         /* Re-check read-acquire for mo_graph edges */
2209                         if (acquire->is_read())
2210                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2211
2212                         /* propagate synchronization to later actions */
2213                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2214                         for (; (*rit) != acquire; rit++) {
2215                                 ModelAction *propagate = *rit;
2216                                 if (acquire->happens_before(propagate)) {
2217                                         propagate->synchronize_with(acquire);
2218                                         /* Re-check 'propagate' for mo_graph edges */
2219                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2220                                 }
2221                         }
2222                 }
2223                 if (complete) {
2224                         it = pending_rel_seqs->erase(it);
2225                         snapshot_free(pending);
2226                 } else {
2227                         it++;
2228                 }
2229         }
2230
2231         // If we resolved promises or data races, see if we have realized a data race.
2232         checkDataRaces();
2233
2234         return updated;
2235 }
2236
2237 /**
2238  * Performs various bookkeeping operations for the current ModelAction. For
2239  * instance, adds action to the per-object, per-thread action vector and to the
2240  * action trace list of all thread actions.
2241  *
2242  * @param act is the ModelAction to add.
2243  */
2244 void ModelChecker::add_action_to_lists(ModelAction *act)
2245 {
2246         int tid = id_to_int(act->get_tid());
2247         ModelAction *uninit = NULL;
2248         int uninit_id = -1;
2249         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2250         if (list->empty() && act->is_atomic_var()) {
2251                 uninit = new_uninitialized_action(act->get_location());
2252                 uninit_id = id_to_int(uninit->get_tid());
2253                 list->push_back(uninit);
2254         }
2255         list->push_back(act);
2256
2257         action_trace->push_back(act);
2258         if (uninit)
2259                 action_trace->push_front(uninit);
2260
2261         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2262         if (tid >= (int)vec->size())
2263                 vec->resize(priv->next_thread_id);
2264         (*vec)[tid].push_back(act);
2265         if (uninit)
2266                 (*vec)[uninit_id].push_front(uninit);
2267
2268         if ((int)thrd_last_action->size() <= tid)
2269                 thrd_last_action->resize(get_num_threads());
2270         (*thrd_last_action)[tid] = act;
2271         if (uninit)
2272                 (*thrd_last_action)[uninit_id] = uninit;
2273
2274         if (act->is_fence() && act->is_release()) {
2275                 if ((int)thrd_last_fence_release->size() <= tid)
2276                         thrd_last_fence_release->resize(get_num_threads());
2277                 (*thrd_last_fence_release)[tid] = act;
2278         }
2279
2280         if (act->is_wait()) {
2281                 void *mutex_loc = (void *) act->get_value();
2282                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2283
2284                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2285                 if (tid >= (int)vec->size())
2286                         vec->resize(priv->next_thread_id);
2287                 (*vec)[tid].push_back(act);
2288         }
2289 }
2290
2291 /**
2292  * @brief Get the last action performed by a particular Thread
2293  * @param tid The thread ID of the Thread in question
2294  * @return The last action in the thread
2295  */
2296 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2297 {
2298         int threadid = id_to_int(tid);
2299         if (threadid < (int)thrd_last_action->size())
2300                 return (*thrd_last_action)[id_to_int(tid)];
2301         else
2302                 return NULL;
2303 }
2304
2305 /**
2306  * @brief Get the last fence release performed by a particular Thread
2307  * @param tid The thread ID of the Thread in question
2308  * @return The last fence release in the thread, if one exists; NULL otherwise
2309  */
2310 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2311 {
2312         int threadid = id_to_int(tid);
2313         if (threadid < (int)thrd_last_fence_release->size())
2314                 return (*thrd_last_fence_release)[id_to_int(tid)];
2315         else
2316                 return NULL;
2317 }
2318
2319 /**
2320  * Gets the last memory_order_seq_cst write (in the total global sequence)
2321  * performed on a particular object (i.e., memory location), not including the
2322  * current action.
2323  * @param curr The current ModelAction; also denotes the object location to
2324  * check
2325  * @return The last seq_cst write
2326  */
2327 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2328 {
2329         void *location = curr->get_location();
2330         action_list_t *list = get_safe_ptr_action(obj_map, location);
2331         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2332         action_list_t::reverse_iterator rit;
2333         for (rit = list->rbegin(); rit != list->rend(); rit++)
2334                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2335                         return *rit;
2336         return NULL;
2337 }
2338
2339 /**
2340  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2341  * performed in a particular thread, prior to a particular fence.
2342  * @param tid The ID of the thread to check
2343  * @param before_fence The fence from which to begin the search; if NULL, then
2344  * search for the most recent fence in the thread.
2345  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2346  */
2347 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2348 {
2349         /* All fences should have NULL location */
2350         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2351         action_list_t::reverse_iterator rit = list->rbegin();
2352
2353         if (before_fence) {
2354                 for (; rit != list->rend(); rit++)
2355                         if (*rit == before_fence)
2356                                 break;
2357
2358                 ASSERT(*rit == before_fence);
2359                 rit++;
2360         }
2361
2362         for (; rit != list->rend(); rit++)
2363                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2364                         return *rit;
2365         return NULL;
2366 }
2367
2368 /**
2369  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2370  * location). This function identifies the mutex according to the current
2371  * action, which is presumed to perform on the same mutex.
2372  * @param curr The current ModelAction; also denotes the object location to
2373  * check
2374  * @return The last unlock operation
2375  */
2376 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2377 {
2378         void *location = curr->get_location();
2379         action_list_t *list = get_safe_ptr_action(obj_map, location);
2380         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2381         action_list_t::reverse_iterator rit;
2382         for (rit = list->rbegin(); rit != list->rend(); rit++)
2383                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2384                         return *rit;
2385         return NULL;
2386 }
2387
2388 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2389 {
2390         ModelAction *parent = get_last_action(tid);
2391         if (!parent)
2392                 parent = get_thread(tid)->get_creation();
2393         return parent;
2394 }
2395
2396 /**
2397  * Returns the clock vector for a given thread.
2398  * @param tid The thread whose clock vector we want
2399  * @return Desired clock vector
2400  */
2401 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2402 {
2403         return get_parent_action(tid)->get_cv();
2404 }
2405
2406 /**
2407  * Resolve a set of Promises with a current write. The set is provided in the
2408  * Node corresponding to @a write.
2409  * @param write The ModelAction that is fulfilling Promises
2410  * @return True if promises were resolved; false otherwise
2411  */
2412 bool ModelChecker::resolve_promises(ModelAction *write)
2413 {
2414         bool haveResolved = false;
2415         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2416         promise_list_t mustResolve, resolved;
2417
2418         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2419                 Promise *promise = (*promises)[promise_index];
2420                 if (write->get_node()->get_promise(i)) {
2421                         ModelAction *read = promise->get_action();
2422                         read_from(read, write);
2423                         //Make sure the promise's value matches the write's value
2424                         ASSERT(promise->is_compatible(write));
2425                         mo_graph->resolvePromise(read, write, &mustResolve);
2426
2427                         resolved.push_back(promise);
2428                         promises->erase(promises->begin() + promise_index);
2429                         actions_to_check.push_back(read);
2430
2431                         haveResolved = true;
2432                 } else
2433                         promise_index++;
2434         }
2435
2436         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2437                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2438                                 == resolved.end())
2439                         priv->failed_promise = true;
2440         }
2441         for (unsigned int i = 0; i < resolved.size(); i++)
2442                 delete resolved[i];
2443         //Check whether reading these writes has made threads unable to
2444         //resolve promises
2445
2446         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2447                 ModelAction *read = actions_to_check[i];
2448                 mo_check_promises(read, true);
2449         }
2450
2451         return haveResolved;
2452 }
2453
2454 /**
2455  * Compute the set of promises that could potentially be satisfied by this
2456  * action. Note that the set computation actually appears in the Node, not in
2457  * ModelChecker.
2458  * @param curr The ModelAction that may satisfy promises
2459  */
2460 void ModelChecker::compute_promises(ModelAction *curr)
2461 {
2462         for (unsigned int i = 0; i < promises->size(); i++) {
2463                 Promise *promise = (*promises)[i];
2464                 const ModelAction *act = promise->get_action();
2465                 ASSERT(act->is_read());
2466                 if (!act->happens_before(curr) &&
2467                                 !act->could_synchronize_with(curr) &&
2468                                 promise->is_compatible(curr) &&
2469                                 promise->get_value() == curr->get_value()) {
2470                         curr->get_node()->set_promise(i, act->is_rmw());
2471                 }
2472         }
2473 }
2474
2475 /** Checks promises in response to change in ClockVector Threads. */
2476 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2477 {
2478         for (unsigned int i = 0; i < promises->size(); i++) {
2479                 Promise *promise = (*promises)[i];
2480                 const ModelAction *act = promise->get_action();
2481                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2482                                 merge_cv->synchronized_since(act)) {
2483                         if (promise->eliminate_thread(tid)) {
2484                                 //Promise has failed
2485                                 priv->failed_promise = true;
2486                                 return;
2487                         }
2488                 }
2489         }
2490 }
2491
2492 void ModelChecker::check_promises_thread_disabled()
2493 {
2494         for (unsigned int i = 0; i < promises->size(); i++) {
2495                 Promise *promise = (*promises)[i];
2496                 if (promise->has_failed()) {
2497                         priv->failed_promise = true;
2498                         return;
2499                 }
2500         }
2501 }
2502
2503 /**
2504  * @brief Checks promises in response to addition to modification order for
2505  * threads.
2506  *
2507  * We test whether threads are still available for satisfying promises after an
2508  * addition to our modification order constraints. Those that are unavailable
2509  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2510  * that promise has failed.
2511  *
2512  * @param act The ModelAction which updated the modification order
2513  * @param is_read_check Should be true if act is a read and we must check for
2514  * updates to the store from which it read (there is a distinction here for
2515  * RMW's, which are both a load and a store)
2516  */
2517 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2518 {
2519         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2520
2521         for (unsigned int i = 0; i < promises->size(); i++) {
2522                 Promise *promise = (*promises)[i];
2523                 const ModelAction *pread = promise->get_action();
2524
2525                 // Is this promise on the same location?
2526                 if (!pread->same_var(write))
2527                         continue;
2528
2529                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2530                         priv->failed_promise = true;
2531                         return;
2532                 }
2533
2534                 // Don't do any lookups twice for the same thread
2535                 if (!promise->thread_is_available(act->get_tid()))
2536                         continue;
2537
2538                 if (mo_graph->checkReachable(promise, write)) {
2539                         if (mo_graph->checkPromise(write, promise)) {
2540                                 priv->failed_promise = true;
2541                                 return;
2542                         }
2543                 }
2544         }
2545 }
2546
2547 /**
2548  * Compute the set of writes that may break the current pending release
2549  * sequence. This information is extracted from previou release sequence
2550  * calculations.
2551  *
2552  * @param curr The current ModelAction. Must be a release sequence fixup
2553  * action.
2554  */
2555 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2556 {
2557         if (pending_rel_seqs->empty())
2558                 return;
2559
2560         struct release_seq *pending = pending_rel_seqs->back();
2561         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2562                 const ModelAction *write = pending->writes[i];
2563                 curr->get_node()->add_relseq_break(write);
2564         }
2565
2566         /* NULL means don't break the sequence; just synchronize */
2567         curr->get_node()->add_relseq_break(NULL);
2568 }
2569
2570 /**
2571  * Build up an initial set of all past writes that this 'read' action may read
2572  * from, as well as any previously-observed future values that must still be valid.
2573  *
2574  * @param curr is the current ModelAction that we are exploring; it must be a
2575  * 'read' operation.
2576  */
2577 void ModelChecker::build_may_read_from(ModelAction *curr)
2578 {
2579         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2580         unsigned int i;
2581         ASSERT(curr->is_read());
2582
2583         ModelAction *last_sc_write = NULL;
2584
2585         if (curr->is_seqcst())
2586                 last_sc_write = get_last_seq_cst_write(curr);
2587
2588         /* Iterate over all threads */
2589         for (i = 0; i < thrd_lists->size(); i++) {
2590                 /* Iterate over actions in thread, starting from most recent */
2591                 action_list_t *list = &(*thrd_lists)[i];
2592                 action_list_t::reverse_iterator rit;
2593                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2594                         ModelAction *act = *rit;
2595
2596                         /* Only consider 'write' actions */
2597                         if (!act->is_write() || act == curr)
2598                                 continue;
2599
2600                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2601                         bool allow_read = true;
2602
2603                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2604                                 allow_read = false;
2605                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2606                                 allow_read = false;
2607
2608                         if (allow_read) {
2609                                 /* Only add feasible reads */
2610                                 mo_graph->startChanges();
2611                                 r_modification_order(curr, act);
2612                                 if (!is_infeasible())
2613                                         curr->get_node()->add_read_from(act);
2614                                 mo_graph->rollbackChanges();
2615                         }
2616
2617                         /* Include at most one act per-thread that "happens before" curr */
2618                         if (act->happens_before(curr))
2619                                 break;
2620                 }
2621         }
2622
2623         /* Inherit existing, promised future values */
2624         for (i = 0; i < promises->size(); i++) {
2625                 const Promise *promise = (*promises)[i];
2626                 const ModelAction *promise_read = promise->get_action();
2627                 if (promise_read->same_var(curr)) {
2628                         /* Only add feasible future-values */
2629                         mo_graph->startChanges();
2630                         r_modification_order(curr, promise);
2631                         if (!is_infeasible()) {
2632                                 const struct future_value fv = promise->get_fv();
2633                                 curr->get_node()->add_future_value(fv);
2634                         }
2635                         mo_graph->rollbackChanges();
2636                 }
2637         }
2638
2639         /* We may find no valid may-read-from only if the execution is doomed */
2640         if (!curr->get_node()->get_read_from_size() && curr->get_node()->future_value_empty()) {
2641                 priv->no_valid_reads = true;
2642                 set_assert();
2643         }
2644
2645         if (DBG_ENABLED()) {
2646                 model_print("Reached read action:\n");
2647                 curr->print();
2648                 model_print("Printing may_read_from\n");
2649                 curr->get_node()->print_may_read_from();
2650                 model_print("End printing may_read_from\n");
2651         }
2652 }
2653
2654 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2655 {
2656         for ( ; write != NULL; write = write->get_reads_from()) {
2657                 /* UNINIT actions don't have a Node, and they never sleep */
2658                 if (write->is_uninitialized())
2659                         return true;
2660                 Node *prevnode = write->get_node()->get_parent();
2661
2662                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2663                 if (write->is_release() && thread_sleep)
2664                         return true;
2665                 if (!write->is_rmw())
2666                         return false;
2667         }
2668         return true;
2669 }
2670
2671 /**
2672  * @brief Create a new action representing an uninitialized atomic
2673  * @param location The memory location of the atomic object
2674  * @return A pointer to a new ModelAction
2675  */
2676 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2677 {
2678         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2679         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2680         act->create_cv(NULL);
2681         return act;
2682 }
2683
2684 static void print_list(action_list_t *list)
2685 {
2686         action_list_t::iterator it;
2687
2688         model_print("---------------------------------------------------------------------\n");
2689
2690         unsigned int hash = 0;
2691
2692         for (it = list->begin(); it != list->end(); it++) {
2693                 (*it)->print();
2694                 hash = hash^(hash<<3)^((*it)->hash());
2695         }
2696         model_print("HASH %u\n", hash);
2697         model_print("---------------------------------------------------------------------\n");
2698 }
2699
2700 #if SUPPORT_MOD_ORDER_DUMP
2701 void ModelChecker::dumpGraph(char *filename) const
2702 {
2703         char buffer[200];
2704         sprintf(buffer, "%s.dot", filename);
2705         FILE *file = fopen(buffer, "w");
2706         fprintf(file, "digraph %s {\n", filename);
2707         mo_graph->dumpNodes(file);
2708         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2709
2710         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2711                 ModelAction *action = *it;
2712                 if (action->is_read()) {
2713                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2714                         if (action->get_reads_from() != NULL)
2715                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2716                 }
2717                 if (thread_array[action->get_tid()] != NULL) {
2718                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2719                 }
2720
2721                 thread_array[action->get_tid()] = action;
2722         }
2723         fprintf(file, "}\n");
2724         model_free(thread_array);
2725         fclose(file);
2726 }
2727 #endif
2728
2729 /** @brief Prints an execution trace summary. */
2730 void ModelChecker::print_summary() const
2731 {
2732 #if SUPPORT_MOD_ORDER_DUMP
2733         char buffername[100];
2734         sprintf(buffername, "exec%04u", stats.num_total);
2735         mo_graph->dumpGraphToFile(buffername);
2736         sprintf(buffername, "graph%04u", stats.num_total);
2737         dumpGraph(buffername);
2738 #endif
2739
2740         model_print("Execution %d:", stats.num_total);
2741         if (isfeasibleprefix())
2742                 model_print("\n");
2743         else
2744                 print_infeasibility(" INFEASIBLE");
2745         print_list(action_trace);
2746         model_print("\n");
2747 }
2748
2749 /**
2750  * Add a Thread to the system for the first time. Should only be called once
2751  * per thread.
2752  * @param t The Thread to add
2753  */
2754 void ModelChecker::add_thread(Thread *t)
2755 {
2756         thread_map->put(id_to_int(t->get_id()), t);
2757         scheduler->add_thread(t);
2758 }
2759
2760 /**
2761  * Removes a thread from the scheduler.
2762  * @param the thread to remove.
2763  */
2764 void ModelChecker::remove_thread(Thread *t)
2765 {
2766         scheduler->remove_thread(t);
2767 }
2768
2769 /**
2770  * @brief Get a Thread reference by its ID
2771  * @param tid The Thread's ID
2772  * @return A Thread reference
2773  */
2774 Thread * ModelChecker::get_thread(thread_id_t tid) const
2775 {
2776         return thread_map->get(id_to_int(tid));
2777 }
2778
2779 /**
2780  * @brief Get a reference to the Thread in which a ModelAction was executed
2781  * @param act The ModelAction
2782  * @return A Thread reference
2783  */
2784 Thread * ModelChecker::get_thread(const ModelAction *act) const
2785 {
2786         return get_thread(act->get_tid());
2787 }
2788
2789 /**
2790  * @brief Check if a Thread is currently enabled
2791  * @param t The Thread to check
2792  * @return True if the Thread is currently enabled
2793  */
2794 bool ModelChecker::is_enabled(Thread *t) const
2795 {
2796         return scheduler->is_enabled(t);
2797 }
2798
2799 /**
2800  * @brief Check if a Thread is currently enabled
2801  * @param tid The ID of the Thread to check
2802  * @return True if the Thread is currently enabled
2803  */
2804 bool ModelChecker::is_enabled(thread_id_t tid) const
2805 {
2806         return scheduler->is_enabled(tid);
2807 }
2808
2809 /**
2810  * Switch from a model-checker context to a user-thread context. This is the
2811  * complement of ModelChecker::switch_to_master and must be called from the
2812  * model-checker context
2813  *
2814  * @param thread The user-thread to switch to
2815  */
2816 void ModelChecker::switch_from_master(Thread *thread)
2817 {
2818         scheduler->set_current_thread(thread);
2819         Thread::swap(&system_context, thread);
2820 }
2821
2822 /**
2823  * Switch from a user-context to the "master thread" context (a.k.a. system
2824  * context). This switch is made with the intention of exploring a particular
2825  * model-checking action (described by a ModelAction object). Must be called
2826  * from a user-thread context.
2827  *
2828  * @param act The current action that will be explored. May be NULL only if
2829  * trace is exiting via an assertion (see ModelChecker::set_assert and
2830  * ModelChecker::has_asserted).
2831  * @return Return the value returned by the current action
2832  */
2833 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2834 {
2835         DBG();
2836         Thread *old = thread_current();
2837         ASSERT(!old->get_pending());
2838         old->set_pending(act);
2839         if (Thread::swap(old, &system_context) < 0) {
2840                 perror("swap threads");
2841                 exit(EXIT_FAILURE);
2842         }
2843         return old->get_return_value();
2844 }
2845
2846 /**
2847  * Takes the next step in the execution, if possible.
2848  * @param curr The current step to take
2849  * @return Returns the next Thread to run, if any; NULL if this execution
2850  * should terminate
2851  */
2852 Thread * ModelChecker::take_step(ModelAction *curr)
2853 {
2854         Thread *curr_thrd = get_thread(curr);
2855         ASSERT(curr_thrd->get_state() == THREAD_READY);
2856
2857         curr = check_current_action(curr);
2858
2859         /* Infeasible -> don't take any more steps */
2860         if (is_infeasible())
2861                 return NULL;
2862         else if (isfeasibleprefix() && have_bug_reports()) {
2863                 set_assert();
2864                 return NULL;
2865         }
2866
2867         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2868                 return NULL;
2869
2870         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2871                 scheduler->remove_thread(curr_thrd);
2872
2873         Thread *next_thrd = get_next_thread(curr);
2874
2875         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2876                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2877
2878         return next_thrd;
2879 }
2880
2881 /** Wrapper to run the user's main function, with appropriate arguments */
2882 void user_main_wrapper(void *)
2883 {
2884         user_main(model->params.argc, model->params.argv);
2885 }
2886
2887 /** @brief Run ModelChecker for the user program */
2888 void ModelChecker::run()
2889 {
2890         do {
2891                 thrd_t user_thread;
2892                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2893                 add_thread(t);
2894
2895                 do {
2896                         /*
2897                          * Stash next pending action(s) for thread(s). There
2898                          * should only need to stash one thread's action--the
2899                          * thread which just took a step--plus the first step
2900                          * for any newly-created thread
2901                          */
2902                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2903                                 thread_id_t tid = int_to_id(i);
2904                                 Thread *thr = get_thread(tid);
2905                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2906                                         switch_from_master(thr);
2907                                 }
2908                         }
2909
2910                         /* Catch assertions from prior take_step or from
2911                          * between-ModelAction bugs (e.g., data races) */
2912                         if (has_asserted())
2913                                 break;
2914
2915                         /* Consume the next action for a Thread */
2916                         ModelAction *curr = t->get_pending();
2917                         t->set_pending(NULL);
2918                         t = take_step(curr);
2919                 } while (t && !t->is_model_thread());
2920
2921                 /*
2922                  * Launch end-of-execution release sequence fixups only when
2923                  * the execution is otherwise feasible AND there are:
2924                  *
2925                  * (1) pending release sequences
2926                  * (2) pending assertions that could be invalidated by a change
2927                  * in clock vectors (i.e., data races)
2928                  * (3) no pending promises
2929                  */
2930                 while (!pending_rel_seqs->empty() &&
2931                                 is_feasible_prefix_ignore_relseq() &&
2932                                 !unrealizedraces.empty()) {
2933                         model_print("*** WARNING: release sequence fixup action "
2934                                         "(%zu pending release seuqence(s)) ***\n",
2935                                         pending_rel_seqs->size());
2936                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2937                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2938                                         model_thread);
2939                         take_step(fixup);
2940                 };
2941         } while (next_execution());
2942
2943         model_print("******* Model-checking complete: *******\n");
2944         print_stats();
2945 }