cyclegraph/model: unify, clean up graph printing
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else if (scheduler->all_threads_sleeping())
487                 stats.num_redundant++;
488         else
489                 ASSERT(false);
490 }
491
492 /** @brief Print execution stats */
493 void ModelChecker::print_stats() const
494 {
495         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
496         model_print("Number of redundant executions: %d\n", stats.num_redundant);
497         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
498         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
499         model_print("Total executions: %d\n", stats.num_total);
500         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
501 }
502
503 /**
504  * @brief End-of-exeuction print
505  * @param printbugs Should any existing bugs be printed?
506  */
507 void ModelChecker::print_execution(bool printbugs) const
508 {
509         print_program_output();
510
511         if (DBG_ENABLED() || params.verbose) {
512                 model_print("Earliest divergence point since last feasible execution:\n");
513                 if (earliest_diverge)
514                         earliest_diverge->print();
515                 else
516                         model_print("(Not set)\n");
517
518                 model_print("\n");
519                 print_stats();
520         }
521
522         /* Don't print invalid bugs */
523         if (printbugs)
524                 print_bugs();
525
526         model_print("\n");
527         print_summary();
528 }
529
530 /**
531  * Queries the model-checker for more executions to explore and, if one
532  * exists, resets the model-checker state to execute a new execution.
533  *
534  * @return If there are more executions to explore, return true. Otherwise,
535  * return false.
536  */
537 bool ModelChecker::next_execution()
538 {
539         DBG();
540         /* Is this execution a feasible execution that's worth bug-checking? */
541         bool complete = isfeasibleprefix() && (is_complete_execution() ||
542                         have_bug_reports());
543
544         /* End-of-execution bug checks */
545         if (complete) {
546                 if (is_deadlocked())
547                         assert_bug("Deadlock detected");
548
549                 checkDataRaces();
550         }
551
552         record_stats();
553
554         /* Output */
555         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
556                 print_execution(complete);
557         else
558                 clear_program_output();
559
560         if (complete)
561                 earliest_diverge = NULL;
562
563         if ((diverge = get_next_backtrack()) == NULL)
564                 return false;
565
566         if (DBG_ENABLED()) {
567                 model_print("Next execution will diverge at:\n");
568                 diverge->print();
569         }
570
571         reset_to_initial_state();
572         return true;
573 }
574
575 /**
576  * @brief Find the last fence-related backtracking conflict for a ModelAction
577  *
578  * This function performs the search for the most recent conflicting action
579  * against which we should perform backtracking, as affected by fence
580  * operations. This includes pairs of potentially-synchronizing actions which
581  * occur due to fence-acquire or fence-release, and hence should be explored in
582  * the opposite execution order.
583  *
584  * @param act The current action
585  * @return The most recent action which conflicts with act due to fences
586  */
587 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
588 {
589         /* Only perform release/acquire fence backtracking for stores */
590         if (!act->is_write())
591                 return NULL;
592
593         /* Find a fence-release (or, act is a release) */
594         ModelAction *last_release;
595         if (act->is_release())
596                 last_release = act;
597         else
598                 last_release = get_last_fence_release(act->get_tid());
599         if (!last_release)
600                 return NULL;
601
602         /* Skip past the release */
603         action_list_t *list = action_trace;
604         action_list_t::reverse_iterator rit;
605         for (rit = list->rbegin(); rit != list->rend(); rit++)
606                 if (*rit == last_release)
607                         break;
608         ASSERT(rit != list->rend());
609
610         /* Find a prior:
611          *   load-acquire
612          * or
613          *   load --sb-> fence-acquire */
614         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
615         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
616         bool found_acquire_fences = false;
617         for ( ; rit != list->rend(); rit++) {
618                 ModelAction *prev = *rit;
619                 if (act->same_thread(prev))
620                         continue;
621
622                 int tid = id_to_int(prev->get_tid());
623
624                 if (prev->is_read() && act->same_var(prev)) {
625                         if (prev->is_acquire()) {
626                                 /* Found most recent load-acquire, don't need
627                                  * to search for more fences */
628                                 if (!found_acquire_fences)
629                                         return NULL;
630                         } else {
631                                 prior_loads[tid] = prev;
632                         }
633                 }
634                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
635                         found_acquire_fences = true;
636                         acquire_fences[tid] = prev;
637                 }
638         }
639
640         ModelAction *latest_backtrack = NULL;
641         for (unsigned int i = 0; i < acquire_fences.size(); i++)
642                 if (acquire_fences[i] && prior_loads[i])
643                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
644                                 latest_backtrack = acquire_fences[i];
645         return latest_backtrack;
646 }
647
648 /**
649  * @brief Find the last backtracking conflict for a ModelAction
650  *
651  * This function performs the search for the most recent conflicting action
652  * against which we should perform backtracking. This primary includes pairs of
653  * synchronizing actions which should be explored in the opposite execution
654  * order.
655  *
656  * @param act The current action
657  * @return The most recent action which conflicts with act
658  */
659 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
660 {
661         switch (act->get_type()) {
662         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
663         case ATOMIC_READ:
664         case ATOMIC_WRITE:
665         case ATOMIC_RMW: {
666                 ModelAction *ret = NULL;
667
668                 /* linear search: from most recent to oldest */
669                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
670                 action_list_t::reverse_iterator rit;
671                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
672                         ModelAction *prev = *rit;
673                         if (prev->could_synchronize_with(act)) {
674                                 ret = prev;
675                                 break;
676                         }
677                 }
678
679                 ModelAction *ret2 = get_last_fence_conflict(act);
680                 if (!ret2)
681                         return ret;
682                 if (!ret)
683                         return ret2;
684                 if (*ret < *ret2)
685                         return ret2;
686                 return ret;
687         }
688         case ATOMIC_LOCK:
689         case ATOMIC_TRYLOCK: {
690                 /* linear search: from most recent to oldest */
691                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
692                 action_list_t::reverse_iterator rit;
693                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
694                         ModelAction *prev = *rit;
695                         if (act->is_conflicting_lock(prev))
696                                 return prev;
697                 }
698                 break;
699         }
700         case ATOMIC_UNLOCK: {
701                 /* linear search: from most recent to oldest */
702                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
703                 action_list_t::reverse_iterator rit;
704                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
705                         ModelAction *prev = *rit;
706                         if (!act->same_thread(prev) && prev->is_failed_trylock())
707                                 return prev;
708                 }
709                 break;
710         }
711         case ATOMIC_WAIT: {
712                 /* linear search: from most recent to oldest */
713                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
714                 action_list_t::reverse_iterator rit;
715                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
716                         ModelAction *prev = *rit;
717                         if (!act->same_thread(prev) && prev->is_failed_trylock())
718                                 return prev;
719                         if (!act->same_thread(prev) && prev->is_notify())
720                                 return prev;
721                 }
722                 break;
723         }
724
725         case ATOMIC_NOTIFY_ALL:
726         case ATOMIC_NOTIFY_ONE: {
727                 /* linear search: from most recent to oldest */
728                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
729                 action_list_t::reverse_iterator rit;
730                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
731                         ModelAction *prev = *rit;
732                         if (!act->same_thread(prev) && prev->is_wait())
733                                 return prev;
734                 }
735                 break;
736         }
737         default:
738                 break;
739         }
740         return NULL;
741 }
742
743 /** This method finds backtracking points where we should try to
744  * reorder the parameter ModelAction against.
745  *
746  * @param the ModelAction to find backtracking points for.
747  */
748 void ModelChecker::set_backtracking(ModelAction *act)
749 {
750         Thread *t = get_thread(act);
751         ModelAction *prev = get_last_conflict(act);
752         if (prev == NULL)
753                 return;
754
755         Node *node = prev->get_node()->get_parent();
756
757         int low_tid, high_tid;
758         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
759                 low_tid = id_to_int(act->get_tid());
760                 high_tid = low_tid + 1;
761         } else {
762                 low_tid = 0;
763                 high_tid = get_num_threads();
764         }
765
766         for (int i = low_tid; i < high_tid; i++) {
767                 thread_id_t tid = int_to_id(i);
768
769                 /* Make sure this thread can be enabled here. */
770                 if (i >= node->get_num_threads())
771                         break;
772
773                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774                 if (node->enabled_status(tid) != THREAD_ENABLED)
775                         continue;
776
777                 /* Check if this has been explored already */
778                 if (node->has_been_explored(tid))
779                         continue;
780
781                 /* See if fairness allows */
782                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
783                         bool unfair = false;
784                         for (int t = 0; t < node->get_num_threads(); t++) {
785                                 thread_id_t tother = int_to_id(t);
786                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
787                                         unfair = true;
788                                         break;
789                                 }
790                         }
791                         if (unfair)
792                                 continue;
793                 }
794                 /* Cache the latest backtracking point */
795                 set_latest_backtrack(prev);
796
797                 /* If this is a new backtracking point, mark the tree */
798                 if (!node->set_backtrack(tid))
799                         continue;
800                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
801                                         id_to_int(prev->get_tid()),
802                                         id_to_int(t->get_id()));
803                 if (DBG_ENABLED()) {
804                         prev->print();
805                         act->print();
806                 }
807         }
808 }
809
810 /**
811  * @brief Cache the a backtracking point as the "most recent", if eligible
812  *
813  * Note that this does not prepare the NodeStack for this backtracking
814  * operation, it only caches the action on a per-execution basis
815  *
816  * @param act The operation at which we should explore a different next action
817  * (i.e., backtracking point)
818  * @return True, if this action is now the most recent backtracking point;
819  * false otherwise
820  */
821 bool ModelChecker::set_latest_backtrack(ModelAction *act)
822 {
823         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
824                 priv->next_backtrack = act;
825                 return true;
826         }
827         return false;
828 }
829
830 /**
831  * Returns last backtracking point. The model checker will explore a different
832  * path for this point in the next execution.
833  * @return The ModelAction at which the next execution should diverge.
834  */
835 ModelAction * ModelChecker::get_next_backtrack()
836 {
837         ModelAction *next = priv->next_backtrack;
838         priv->next_backtrack = NULL;
839         return next;
840 }
841
842 /**
843  * Processes a read model action.
844  * @param curr is the read model action to process.
845  * @return True if processing this read updates the mo_graph.
846  */
847 bool ModelChecker::process_read(ModelAction *curr)
848 {
849         Node *node = curr->get_node();
850         uint64_t value = VALUE_NONE;
851         bool updated = false;
852         while (true) {
853                 switch (node->get_read_from_status()) {
854                 case READ_FROM_PAST: {
855                         const ModelAction *rf = node->get_read_from_past();
856                         ASSERT(rf);
857
858                         mo_graph->startChanges();
859                         value = rf->get_value();
860                         check_recency(curr, rf);
861                         bool r_status = r_modification_order(curr, rf);
862
863                         if (is_infeasible() && node->increment_read_from()) {
864                                 mo_graph->rollbackChanges();
865                                 priv->too_many_reads = false;
866                                 continue;
867                         }
868
869                         read_from(curr, rf);
870                         mo_graph->commitChanges();
871                         mo_check_promises(curr, true);
872
873                         updated |= r_status;
874                         break;
875                 }
876                 case READ_FROM_PROMISE: {
877                         Promise *promise = curr->get_node()->get_read_from_promise();
878                         promise->add_reader(curr);
879                         value = promise->get_value();
880                         curr->set_read_from_promise(promise);
881                         mo_graph->startChanges();
882                         updated = r_modification_order(curr, promise);
883                         mo_graph->commitChanges();
884                         break;
885                 }
886                 case READ_FROM_FUTURE: {
887                         /* Read from future value */
888                         struct future_value fv = node->get_future_value();
889                         Promise *promise = new Promise(curr, fv);
890                         value = fv.value;
891                         curr->set_read_from_promise(promise);
892                         promises->push_back(promise);
893                         mo_graph->startChanges();
894                         updated = r_modification_order(curr, promise);
895                         mo_graph->commitChanges();
896                         break;
897                 }
898                 default:
899                         ASSERT(false);
900                 }
901                 get_thread(curr)->set_return_value(value);
902                 return updated;
903         }
904 }
905
906 /**
907  * Processes a lock, trylock, or unlock model action.  @param curr is
908  * the read model action to process.
909  *
910  * The try lock operation checks whether the lock is taken.  If not,
911  * it falls to the normal lock operation case.  If so, it returns
912  * fail.
913  *
914  * The lock operation has already been checked that it is enabled, so
915  * it just grabs the lock and synchronizes with the previous unlock.
916  *
917  * The unlock operation has to re-enable all of the threads that are
918  * waiting on the lock.
919  *
920  * @return True if synchronization was updated; false otherwise
921  */
922 bool ModelChecker::process_mutex(ModelAction *curr)
923 {
924         std::mutex *mutex = NULL;
925         struct std::mutex_state *state = NULL;
926
927         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
928                 mutex = (std::mutex *)curr->get_location();
929                 state = mutex->get_state();
930         } else if (curr->is_wait()) {
931                 mutex = (std::mutex *)curr->get_value();
932                 state = mutex->get_state();
933         }
934
935         switch (curr->get_type()) {
936         case ATOMIC_TRYLOCK: {
937                 bool success = !state->islocked;
938                 curr->set_try_lock(success);
939                 if (!success) {
940                         get_thread(curr)->set_return_value(0);
941                         break;
942                 }
943                 get_thread(curr)->set_return_value(1);
944         }
945                 //otherwise fall into the lock case
946         case ATOMIC_LOCK: {
947                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
948                         assert_bug("Lock access before initialization");
949                 state->islocked = true;
950                 ModelAction *unlock = get_last_unlock(curr);
951                 //synchronize with the previous unlock statement
952                 if (unlock != NULL) {
953                         curr->synchronize_with(unlock);
954                         return true;
955                 }
956                 break;
957         }
958         case ATOMIC_UNLOCK: {
959                 //unlock the lock
960                 state->islocked = false;
961                 //wake up the other threads
962                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
963                 //activate all the waiting threads
964                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
965                         scheduler->wake(get_thread(*rit));
966                 }
967                 waiters->clear();
968                 break;
969         }
970         case ATOMIC_WAIT: {
971                 //unlock the lock
972                 state->islocked = false;
973                 //wake up the other threads
974                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
975                 //activate all the waiting threads
976                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
977                         scheduler->wake(get_thread(*rit));
978                 }
979                 waiters->clear();
980                 //check whether we should go to sleep or not...simulate spurious failures
981                 if (curr->get_node()->get_misc() == 0) {
982                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
983                         //disable us
984                         scheduler->sleep(get_thread(curr));
985                 }
986                 break;
987         }
988         case ATOMIC_NOTIFY_ALL: {
989                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
990                 //activate all the waiting threads
991                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
992                         scheduler->wake(get_thread(*rit));
993                 }
994                 waiters->clear();
995                 break;
996         }
997         case ATOMIC_NOTIFY_ONE: {
998                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
999                 int wakeupthread = curr->get_node()->get_misc();
1000                 action_list_t::iterator it = waiters->begin();
1001                 advance(it, wakeupthread);
1002                 scheduler->wake(get_thread(*it));
1003                 waiters->erase(it);
1004                 break;
1005         }
1006
1007         default:
1008                 ASSERT(0);
1009         }
1010         return false;
1011 }
1012
1013 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1014 {
1015         /* Do more ambitious checks now that mo is more complete */
1016         if (mo_may_allow(writer, reader)) {
1017                 Node *node = reader->get_node();
1018
1019                 /* Find an ancestor thread which exists at the time of the reader */
1020                 Thread *write_thread = get_thread(writer);
1021                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1022                         write_thread = write_thread->get_parent();
1023
1024                 struct future_value fv = {
1025                         writer->get_value(),
1026                         writer->get_seq_number() + params.maxfuturedelay,
1027                         write_thread->get_id(),
1028                 };
1029                 if (node->add_future_value(fv))
1030                         set_latest_backtrack(reader);
1031         }
1032 }
1033
1034 /**
1035  * Process a write ModelAction
1036  * @param curr The ModelAction to process
1037  * @return True if the mo_graph was updated or promises were resolved
1038  */
1039 bool ModelChecker::process_write(ModelAction *curr)
1040 {
1041         bool updated_mod_order = w_modification_order(curr);
1042         bool updated_promises = resolve_promises(curr);
1043
1044         if (promises->size() == 0) {
1045                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1046                         struct PendingFutureValue pfv = (*futurevalues)[i];
1047                         add_future_value(pfv.writer, pfv.act);
1048                 }
1049                 futurevalues->clear();
1050         }
1051
1052         mo_graph->commitChanges();
1053         mo_check_promises(curr, false);
1054
1055         get_thread(curr)->set_return_value(VALUE_NONE);
1056         return updated_mod_order || updated_promises;
1057 }
1058
1059 /**
1060  * Process a fence ModelAction
1061  * @param curr The ModelAction to process
1062  * @return True if synchronization was updated
1063  */
1064 bool ModelChecker::process_fence(ModelAction *curr)
1065 {
1066         /*
1067          * fence-relaxed: no-op
1068          * fence-release: only log the occurence (not in this function), for
1069          *   use in later synchronization
1070          * fence-acquire (this function): search for hypothetical release
1071          *   sequences
1072          */
1073         bool updated = false;
1074         if (curr->is_acquire()) {
1075                 action_list_t *list = action_trace;
1076                 action_list_t::reverse_iterator rit;
1077                 /* Find X : is_read(X) && X --sb-> curr */
1078                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1079                         ModelAction *act = *rit;
1080                         if (act == curr)
1081                                 continue;
1082                         if (act->get_tid() != curr->get_tid())
1083                                 continue;
1084                         /* Stop at the beginning of the thread */
1085                         if (act->is_thread_start())
1086                                 break;
1087                         /* Stop once we reach a prior fence-acquire */
1088                         if (act->is_fence() && act->is_acquire())
1089                                 break;
1090                         if (!act->is_read())
1091                                 continue;
1092                         /* read-acquire will find its own release sequences */
1093                         if (act->is_acquire())
1094                                 continue;
1095
1096                         /* Establish hypothetical release sequences */
1097                         rel_heads_list_t release_heads;
1098                         get_release_seq_heads(curr, act, &release_heads);
1099                         for (unsigned int i = 0; i < release_heads.size(); i++)
1100                                 if (!curr->synchronize_with(release_heads[i]))
1101                                         set_bad_synchronization();
1102                         if (release_heads.size() != 0)
1103                                 updated = true;
1104                 }
1105         }
1106         return updated;
1107 }
1108
1109 /**
1110  * @brief Process the current action for thread-related activity
1111  *
1112  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1113  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1114  * synchronization, etc.  This function is a no-op for non-THREAD actions
1115  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1116  *
1117  * @param curr The current action
1118  * @return True if synchronization was updated or a thread completed
1119  */
1120 bool ModelChecker::process_thread_action(ModelAction *curr)
1121 {
1122         bool updated = false;
1123
1124         switch (curr->get_type()) {
1125         case THREAD_CREATE: {
1126                 thrd_t *thrd = (thrd_t *)curr->get_location();
1127                 struct thread_params *params = (struct thread_params *)curr->get_value();
1128                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1129                 add_thread(th);
1130                 th->set_creation(curr);
1131                 /* Promises can be satisfied by children */
1132                 for (unsigned int i = 0; i < promises->size(); i++) {
1133                         Promise *promise = (*promises)[i];
1134                         if (promise->thread_is_available(curr->get_tid()))
1135                                 promise->add_thread(th->get_id());
1136                 }
1137                 break;
1138         }
1139         case THREAD_JOIN: {
1140                 Thread *blocking = curr->get_thread_operand();
1141                 ModelAction *act = get_last_action(blocking->get_id());
1142                 curr->synchronize_with(act);
1143                 updated = true; /* trigger rel-seq checks */
1144                 break;
1145         }
1146         case THREAD_FINISH: {
1147                 Thread *th = get_thread(curr);
1148                 while (!th->wait_list_empty()) {
1149                         ModelAction *act = th->pop_wait_list();
1150                         scheduler->wake(get_thread(act));
1151                 }
1152                 th->complete();
1153                 /* Completed thread can't satisfy promises */
1154                 for (unsigned int i = 0; i < promises->size(); i++) {
1155                         Promise *promise = (*promises)[i];
1156                         if (promise->thread_is_available(th->get_id()))
1157                                 if (promise->eliminate_thread(th->get_id()))
1158                                         priv->failed_promise = true;
1159                 }
1160                 updated = true; /* trigger rel-seq checks */
1161                 break;
1162         }
1163         case THREAD_START: {
1164                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1165                 break;
1166         }
1167         default:
1168                 break;
1169         }
1170
1171         return updated;
1172 }
1173
1174 /**
1175  * @brief Process the current action for release sequence fixup activity
1176  *
1177  * Performs model-checker release sequence fixups for the current action,
1178  * forcing a single pending release sequence to break (with a given, potential
1179  * "loose" write) or to complete (i.e., synchronize). If a pending release
1180  * sequence forms a complete release sequence, then we must perform the fixup
1181  * synchronization, mo_graph additions, etc.
1182  *
1183  * @param curr The current action; must be a release sequence fixup action
1184  * @param work_queue The work queue to which to add work items as they are
1185  * generated
1186  */
1187 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1188 {
1189         const ModelAction *write = curr->get_node()->get_relseq_break();
1190         struct release_seq *sequence = pending_rel_seqs->back();
1191         pending_rel_seqs->pop_back();
1192         ASSERT(sequence);
1193         ModelAction *acquire = sequence->acquire;
1194         const ModelAction *rf = sequence->rf;
1195         const ModelAction *release = sequence->release;
1196         ASSERT(acquire);
1197         ASSERT(release);
1198         ASSERT(rf);
1199         ASSERT(release->same_thread(rf));
1200
1201         if (write == NULL) {
1202                 /**
1203                  * @todo Forcing a synchronization requires that we set
1204                  * modification order constraints. For instance, we can't allow
1205                  * a fixup sequence in which two separate read-acquire
1206                  * operations read from the same sequence, where the first one
1207                  * synchronizes and the other doesn't. Essentially, we can't
1208                  * allow any writes to insert themselves between 'release' and
1209                  * 'rf'
1210                  */
1211
1212                 /* Must synchronize */
1213                 if (!acquire->synchronize_with(release)) {
1214                         set_bad_synchronization();
1215                         return;
1216                 }
1217                 /* Re-check all pending release sequences */
1218                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1219                 /* Re-check act for mo_graph edges */
1220                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1221
1222                 /* propagate synchronization to later actions */
1223                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1224                 for (; (*rit) != acquire; rit++) {
1225                         ModelAction *propagate = *rit;
1226                         if (acquire->happens_before(propagate)) {
1227                                 propagate->synchronize_with(acquire);
1228                                 /* Re-check 'propagate' for mo_graph edges */
1229                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1230                         }
1231                 }
1232         } else {
1233                 /* Break release sequence with new edges:
1234                  *   release --mo--> write --mo--> rf */
1235                 mo_graph->addEdge(release, write);
1236                 mo_graph->addEdge(write, rf);
1237         }
1238
1239         /* See if we have realized a data race */
1240         checkDataRaces();
1241 }
1242
1243 /**
1244  * Initialize the current action by performing one or more of the following
1245  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1246  * in the NodeStack, manipulating backtracking sets, allocating and
1247  * initializing clock vectors, and computing the promises to fulfill.
1248  *
1249  * @param curr The current action, as passed from the user context; may be
1250  * freed/invalidated after the execution of this function, with a different
1251  * action "returned" its place (pass-by-reference)
1252  * @return True if curr is a newly-explored action; false otherwise
1253  */
1254 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1255 {
1256         ModelAction *newcurr;
1257
1258         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1259                 newcurr = process_rmw(*curr);
1260                 delete *curr;
1261
1262                 if (newcurr->is_rmw())
1263                         compute_promises(newcurr);
1264
1265                 *curr = newcurr;
1266                 return false;
1267         }
1268
1269         (*curr)->set_seq_number(get_next_seq_num());
1270
1271         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1272         if (newcurr) {
1273                 /* First restore type and order in case of RMW operation */
1274                 if ((*curr)->is_rmwr())
1275                         newcurr->copy_typeandorder(*curr);
1276
1277                 ASSERT((*curr)->get_location() == newcurr->get_location());
1278                 newcurr->copy_from_new(*curr);
1279
1280                 /* Discard duplicate ModelAction; use action from NodeStack */
1281                 delete *curr;
1282
1283                 /* Always compute new clock vector */
1284                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1285
1286                 *curr = newcurr;
1287                 return false; /* Action was explored previously */
1288         } else {
1289                 newcurr = *curr;
1290
1291                 /* Always compute new clock vector */
1292                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1293
1294                 /* Assign most recent release fence */
1295                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1296
1297                 /*
1298                  * Perform one-time actions when pushing new ModelAction onto
1299                  * NodeStack
1300                  */
1301                 if (newcurr->is_write())
1302                         compute_promises(newcurr);
1303                 else if (newcurr->is_relseq_fixup())
1304                         compute_relseq_breakwrites(newcurr);
1305                 else if (newcurr->is_wait())
1306                         newcurr->get_node()->set_misc_max(2);
1307                 else if (newcurr->is_notify_one()) {
1308                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1309                 }
1310                 return true; /* This was a new ModelAction */
1311         }
1312 }
1313
1314 /**
1315  * @brief Establish reads-from relation between two actions
1316  *
1317  * Perform basic operations involved with establishing a concrete rf relation,
1318  * including setting the ModelAction data and checking for release sequences.
1319  *
1320  * @param act The action that is reading (must be a read)
1321  * @param rf The action from which we are reading (must be a write)
1322  *
1323  * @return True if this read established synchronization
1324  */
1325 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1326 {
1327         act->set_read_from(rf);
1328         if (rf != NULL && act->is_acquire()) {
1329                 rel_heads_list_t release_heads;
1330                 get_release_seq_heads(act, act, &release_heads);
1331                 int num_heads = release_heads.size();
1332                 for (unsigned int i = 0; i < release_heads.size(); i++)
1333                         if (!act->synchronize_with(release_heads[i])) {
1334                                 set_bad_synchronization();
1335                                 num_heads--;
1336                         }
1337                 return num_heads > 0;
1338         }
1339         return false;
1340 }
1341
1342 /**
1343  * Check promises and eliminate potentially-satisfying threads when a thread is
1344  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1345  * no longer satisfy a promise generated from that thread.
1346  *
1347  * @param blocker The thread on which a thread is waiting
1348  * @param waiting The waiting thread
1349  */
1350 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1351 {
1352         for (unsigned int i = 0; i < promises->size(); i++) {
1353                 Promise *promise = (*promises)[i];
1354                 if (!promise->thread_is_available(waiting->get_id()))
1355                         continue;
1356                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1357                         ModelAction *reader = promise->get_reader(j);
1358                         if (reader->get_tid() != blocker->get_id())
1359                                 continue;
1360                         if (promise->eliminate_thread(waiting->get_id())) {
1361                                 /* Promise has failed */
1362                                 priv->failed_promise = true;
1363                         } else {
1364                                 /* Only eliminate the 'waiting' thread once */
1365                                 return;
1366                         }
1367                 }
1368         }
1369 }
1370
1371 /**
1372  * @brief Check whether a model action is enabled.
1373  *
1374  * Checks whether a lock or join operation would be successful (i.e., is the
1375  * lock already locked, or is the joined thread already complete). If not, put
1376  * the action in a waiter list.
1377  *
1378  * @param curr is the ModelAction to check whether it is enabled.
1379  * @return a bool that indicates whether the action is enabled.
1380  */
1381 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1382         if (curr->is_lock()) {
1383                 std::mutex *lock = (std::mutex *)curr->get_location();
1384                 struct std::mutex_state *state = lock->get_state();
1385                 if (state->islocked) {
1386                         //Stick the action in the appropriate waiting queue
1387                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1388                         return false;
1389                 }
1390         } else if (curr->get_type() == THREAD_JOIN) {
1391                 Thread *blocking = (Thread *)curr->get_location();
1392                 if (!blocking->is_complete()) {
1393                         blocking->push_wait_list(curr);
1394                         thread_blocking_check_promises(blocking, get_thread(curr));
1395                         return false;
1396                 }
1397         }
1398
1399         return true;
1400 }
1401
1402 /**
1403  * This is the heart of the model checker routine. It performs model-checking
1404  * actions corresponding to a given "current action." Among other processes, it
1405  * calculates reads-from relationships, updates synchronization clock vectors,
1406  * forms a memory_order constraints graph, and handles replay/backtrack
1407  * execution when running permutations of previously-observed executions.
1408  *
1409  * @param curr The current action to process
1410  * @return The ModelAction that is actually executed; may be different than
1411  * curr; may be NULL, if the current action is not enabled to run
1412  */
1413 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1414 {
1415         ASSERT(curr);
1416         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1417
1418         if (!check_action_enabled(curr)) {
1419                 /* Make the execution look like we chose to run this action
1420                  * much later, when a lock/join can succeed */
1421                 get_thread(curr)->set_pending(curr);
1422                 scheduler->sleep(get_thread(curr));
1423                 return NULL;
1424         }
1425
1426         bool newly_explored = initialize_curr_action(&curr);
1427
1428         DBG();
1429         if (DBG_ENABLED())
1430                 curr->print();
1431
1432         wake_up_sleeping_actions(curr);
1433
1434         /* Add the action to lists before any other model-checking tasks */
1435         if (!second_part_of_rmw)
1436                 add_action_to_lists(curr);
1437
1438         /* Build may_read_from set for newly-created actions */
1439         if (newly_explored && curr->is_read())
1440                 build_may_read_from(curr);
1441
1442         /* Initialize work_queue with the "current action" work */
1443         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1444         while (!work_queue.empty() && !has_asserted()) {
1445                 WorkQueueEntry work = work_queue.front();
1446                 work_queue.pop_front();
1447
1448                 switch (work.type) {
1449                 case WORK_CHECK_CURR_ACTION: {
1450                         ModelAction *act = work.action;
1451                         bool update = false; /* update this location's release seq's */
1452                         bool update_all = false; /* update all release seq's */
1453
1454                         if (process_thread_action(curr))
1455                                 update_all = true;
1456
1457                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1458                                 update = true;
1459
1460                         if (act->is_write() && process_write(act))
1461                                 update = true;
1462
1463                         if (act->is_fence() && process_fence(act))
1464                                 update_all = true;
1465
1466                         if (act->is_mutex_op() && process_mutex(act))
1467                                 update_all = true;
1468
1469                         if (act->is_relseq_fixup())
1470                                 process_relseq_fixup(curr, &work_queue);
1471
1472                         if (update_all)
1473                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1474                         else if (update)
1475                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1476                         break;
1477                 }
1478                 case WORK_CHECK_RELEASE_SEQ:
1479                         resolve_release_sequences(work.location, &work_queue);
1480                         break;
1481                 case WORK_CHECK_MO_EDGES: {
1482                         /** @todo Complete verification of work_queue */
1483                         ModelAction *act = work.action;
1484                         bool updated = false;
1485
1486                         if (act->is_read()) {
1487                                 const ModelAction *rf = act->get_reads_from();
1488                                 const Promise *promise = act->get_reads_from_promise();
1489                                 if (rf) {
1490                                         if (r_modification_order(act, rf))
1491                                                 updated = true;
1492                                 } else if (promise) {
1493                                         if (r_modification_order(act, promise))
1494                                                 updated = true;
1495                                 }
1496                         }
1497                         if (act->is_write()) {
1498                                 if (w_modification_order(act))
1499                                         updated = true;
1500                         }
1501                         mo_graph->commitChanges();
1502
1503                         if (updated)
1504                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1505                         break;
1506                 }
1507                 default:
1508                         ASSERT(false);
1509                         break;
1510                 }
1511         }
1512
1513         check_curr_backtracking(curr);
1514         set_backtracking(curr);
1515         return curr;
1516 }
1517
1518 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1519 {
1520         Node *currnode = curr->get_node();
1521         Node *parnode = currnode->get_parent();
1522
1523         if ((parnode && !parnode->backtrack_empty()) ||
1524                          !currnode->misc_empty() ||
1525                          !currnode->read_from_empty() ||
1526                          !currnode->promise_empty() ||
1527                          !currnode->relseq_break_empty()) {
1528                 set_latest_backtrack(curr);
1529         }
1530 }
1531
1532 bool ModelChecker::promises_expired() const
1533 {
1534         for (unsigned int i = 0; i < promises->size(); i++) {
1535                 Promise *promise = (*promises)[i];
1536                 if (promise->get_expiration() < priv->used_sequence_numbers)
1537                         return true;
1538         }
1539         return false;
1540 }
1541
1542 /**
1543  * This is the strongest feasibility check available.
1544  * @return whether the current trace (partial or complete) must be a prefix of
1545  * a feasible trace.
1546  */
1547 bool ModelChecker::isfeasibleprefix() const
1548 {
1549         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1550 }
1551
1552 /**
1553  * Print disagnostic information about an infeasible execution
1554  * @param prefix A string to prefix the output with; if NULL, then a default
1555  * message prefix will be provided
1556  */
1557 void ModelChecker::print_infeasibility(const char *prefix) const
1558 {
1559         char buf[100];
1560         char *ptr = buf;
1561         if (mo_graph->checkForCycles())
1562                 ptr += sprintf(ptr, "[mo cycle]");
1563         if (priv->failed_promise)
1564                 ptr += sprintf(ptr, "[failed promise]");
1565         if (priv->too_many_reads)
1566                 ptr += sprintf(ptr, "[too many reads]");
1567         if (priv->no_valid_reads)
1568                 ptr += sprintf(ptr, "[no valid reads-from]");
1569         if (priv->bad_synchronization)
1570                 ptr += sprintf(ptr, "[bad sw ordering]");
1571         if (promises_expired())
1572                 ptr += sprintf(ptr, "[promise expired]");
1573         if (promises->size() != 0)
1574                 ptr += sprintf(ptr, "[unresolved promise]");
1575         if (ptr != buf)
1576                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1577 }
1578
1579 /**
1580  * Returns whether the current completed trace is feasible, except for pending
1581  * release sequences.
1582  */
1583 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1584 {
1585         return !is_infeasible() && promises->size() == 0;
1586 }
1587
1588 /**
1589  * Check if the current partial trace is infeasible. Does not check any
1590  * end-of-execution flags, which might rule out the execution. Thus, this is
1591  * useful only for ruling an execution as infeasible.
1592  * @return whether the current partial trace is infeasible.
1593  */
1594 bool ModelChecker::is_infeasible() const
1595 {
1596         return mo_graph->checkForCycles() ||
1597                 priv->no_valid_reads ||
1598                 priv->failed_promise ||
1599                 priv->too_many_reads ||
1600                 priv->bad_synchronization ||
1601                 promises_expired();
1602 }
1603
1604 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1605 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1606         ModelAction *lastread = get_last_action(act->get_tid());
1607         lastread->process_rmw(act);
1608         if (act->is_rmw()) {
1609                 if (lastread->get_reads_from())
1610                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1611                 else
1612                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1613                 mo_graph->commitChanges();
1614         }
1615         return lastread;
1616 }
1617
1618 /**
1619  * Checks whether a thread has read from the same write for too many times
1620  * without seeing the effects of a later write.
1621  *
1622  * Basic idea:
1623  * 1) there must a different write that we could read from that would satisfy the modification order,
1624  * 2) we must have read from the same value in excess of maxreads times, and
1625  * 3) that other write must have been in the reads_from set for maxreads times.
1626  *
1627  * If so, we decide that the execution is no longer feasible.
1628  */
1629 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1630 {
1631         if (params.maxreads != 0) {
1632                 if (curr->get_node()->get_read_from_past_size() <= 1)
1633                         return;
1634                 //Must make sure that execution is currently feasible...  We could
1635                 //accidentally clear by rolling back
1636                 if (is_infeasible())
1637                         return;
1638                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1639                 int tid = id_to_int(curr->get_tid());
1640
1641                 /* Skip checks */
1642                 if ((int)thrd_lists->size() <= tid)
1643                         return;
1644                 action_list_t *list = &(*thrd_lists)[tid];
1645
1646                 action_list_t::reverse_iterator rit = list->rbegin();
1647                 /* Skip past curr */
1648                 for (; (*rit) != curr; rit++)
1649                         ;
1650                 /* go past curr now */
1651                 rit++;
1652
1653                 action_list_t::reverse_iterator ritcopy = rit;
1654                 //See if we have enough reads from the same value
1655                 int count = 0;
1656                 for (; count < params.maxreads; rit++, count++) {
1657                         if (rit == list->rend())
1658                                 return;
1659                         ModelAction *act = *rit;
1660                         if (!act->is_read())
1661                                 return;
1662
1663                         if (act->get_reads_from() != rf)
1664                                 return;
1665                         if (act->get_node()->get_read_from_past_size() <= 1)
1666                                 return;
1667                 }
1668                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1669                         /* Get write */
1670                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1671
1672                         /* Need a different write */
1673                         if (write == rf)
1674                                 continue;
1675
1676                         /* Test to see whether this is a feasible write to read from */
1677                         /** NOTE: all members of read-from set should be
1678                          *  feasible, so we no longer check it here **/
1679
1680                         rit = ritcopy;
1681
1682                         bool feasiblewrite = true;
1683                         //new we need to see if this write works for everyone
1684
1685                         for (int loop = count; loop > 0; loop--, rit++) {
1686                                 ModelAction *act = *rit;
1687                                 bool foundvalue = false;
1688                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1689                                         if (act->get_node()->get_read_from_past(j) == write) {
1690                                                 foundvalue = true;
1691                                                 break;
1692                                         }
1693                                 }
1694                                 if (!foundvalue) {
1695                                         feasiblewrite = false;
1696                                         break;
1697                                 }
1698                         }
1699                         if (feasiblewrite) {
1700                                 priv->too_many_reads = true;
1701                                 return;
1702                         }
1703                 }
1704         }
1705 }
1706
1707 /**
1708  * Updates the mo_graph with the constraints imposed from the current
1709  * read.
1710  *
1711  * Basic idea is the following: Go through each other thread and find
1712  * the last action that happened before our read.  Two cases:
1713  *
1714  * (1) The action is a write => that write must either occur before
1715  * the write we read from or be the write we read from.
1716  *
1717  * (2) The action is a read => the write that that action read from
1718  * must occur before the write we read from or be the same write.
1719  *
1720  * @param curr The current action. Must be a read.
1721  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1722  * @return True if modification order edges were added; false otherwise
1723  */
1724 template <typename rf_type>
1725 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1726 {
1727         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1728         unsigned int i;
1729         bool added = false;
1730         ASSERT(curr->is_read());
1731
1732         /* Last SC fence in the current thread */
1733         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1734
1735         /* Iterate over all threads */
1736         for (i = 0; i < thrd_lists->size(); i++) {
1737                 /* Last SC fence in thread i */
1738                 ModelAction *last_sc_fence_thread_local = NULL;
1739                 if (int_to_id((int)i) != curr->get_tid())
1740                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1741
1742                 /* Last SC fence in thread i, before last SC fence in current thread */
1743                 ModelAction *last_sc_fence_thread_before = NULL;
1744                 if (last_sc_fence_local)
1745                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1746
1747                 /* Iterate over actions in thread, starting from most recent */
1748                 action_list_t *list = &(*thrd_lists)[i];
1749                 action_list_t::reverse_iterator rit;
1750                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1751                         ModelAction *act = *rit;
1752
1753                         if (act->is_write() && !act->equals(rf) && act != curr) {
1754                                 /* C++, Section 29.3 statement 5 */
1755                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1756                                                 *act < *last_sc_fence_thread_local) {
1757                                         added = mo_graph->addEdge(act, rf) || added;
1758                                         break;
1759                                 }
1760                                 /* C++, Section 29.3 statement 4 */
1761                                 else if (act->is_seqcst() && last_sc_fence_local &&
1762                                                 *act < *last_sc_fence_local) {
1763                                         added = mo_graph->addEdge(act, rf) || added;
1764                                         break;
1765                                 }
1766                                 /* C++, Section 29.3 statement 6 */
1767                                 else if (last_sc_fence_thread_before &&
1768                                                 *act < *last_sc_fence_thread_before) {
1769                                         added = mo_graph->addEdge(act, rf) || added;
1770                                         break;
1771                                 }
1772                         }
1773
1774                         /*
1775                          * Include at most one act per-thread that "happens
1776                          * before" curr. Don't consider reflexively.
1777                          */
1778                         if (act->happens_before(curr) && act != curr) {
1779                                 if (act->is_write()) {
1780                                         if (!act->equals(rf)) {
1781                                                 added = mo_graph->addEdge(act, rf) || added;
1782                                         }
1783                                 } else {
1784                                         const ModelAction *prevrf = act->get_reads_from();
1785                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1786                                         if (prevrf) {
1787                                                 if (!prevrf->equals(rf))
1788                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1789                                         } else if (!prevrf_promise->equals(rf)) {
1790                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1791                                         }
1792                                 }
1793                                 break;
1794                         }
1795                 }
1796         }
1797
1798         /*
1799          * All compatible, thread-exclusive promises must be ordered after any
1800          * concrete loads from the same thread
1801          */
1802         for (unsigned int i = 0; i < promises->size(); i++)
1803                 if ((*promises)[i]->is_compatible_exclusive(curr))
1804                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1805
1806         return added;
1807 }
1808
1809 /**
1810  * Updates the mo_graph with the constraints imposed from the current write.
1811  *
1812  * Basic idea is the following: Go through each other thread and find
1813  * the lastest action that happened before our write.  Two cases:
1814  *
1815  * (1) The action is a write => that write must occur before
1816  * the current write
1817  *
1818  * (2) The action is a read => the write that that action read from
1819  * must occur before the current write.
1820  *
1821  * This method also handles two other issues:
1822  *
1823  * (I) Sequential Consistency: Making sure that if the current write is
1824  * seq_cst, that it occurs after the previous seq_cst write.
1825  *
1826  * (II) Sending the write back to non-synchronizing reads.
1827  *
1828  * @param curr The current action. Must be a write.
1829  * @return True if modification order edges were added; false otherwise
1830  */
1831 bool ModelChecker::w_modification_order(ModelAction *curr)
1832 {
1833         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1834         unsigned int i;
1835         bool added = false;
1836         ASSERT(curr->is_write());
1837
1838         if (curr->is_seqcst()) {
1839                 /* We have to at least see the last sequentially consistent write,
1840                          so we are initialized. */
1841                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1842                 if (last_seq_cst != NULL) {
1843                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1844                 }
1845         }
1846
1847         /* Last SC fence in the current thread */
1848         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1849
1850         /* Iterate over all threads */
1851         for (i = 0; i < thrd_lists->size(); i++) {
1852                 /* Last SC fence in thread i, before last SC fence in current thread */
1853                 ModelAction *last_sc_fence_thread_before = NULL;
1854                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1855                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1856
1857                 /* Iterate over actions in thread, starting from most recent */
1858                 action_list_t *list = &(*thrd_lists)[i];
1859                 action_list_t::reverse_iterator rit;
1860                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1861                         ModelAction *act = *rit;
1862                         if (act == curr) {
1863                                 /*
1864                                  * 1) If RMW and it actually read from something, then we
1865                                  * already have all relevant edges, so just skip to next
1866                                  * thread.
1867                                  *
1868                                  * 2) If RMW and it didn't read from anything, we should
1869                                  * whatever edge we can get to speed up convergence.
1870                                  *
1871                                  * 3) If normal write, we need to look at earlier actions, so
1872                                  * continue processing list.
1873                                  */
1874                                 if (curr->is_rmw()) {
1875                                         if (curr->get_reads_from() != NULL)
1876                                                 break;
1877                                         else
1878                                                 continue;
1879                                 } else
1880                                         continue;
1881                         }
1882
1883                         /* C++, Section 29.3 statement 7 */
1884                         if (last_sc_fence_thread_before && act->is_write() &&
1885                                         *act < *last_sc_fence_thread_before) {
1886                                 added = mo_graph->addEdge(act, curr) || added;
1887                                 break;
1888                         }
1889
1890                         /*
1891                          * Include at most one act per-thread that "happens
1892                          * before" curr
1893                          */
1894                         if (act->happens_before(curr)) {
1895                                 /*
1896                                  * Note: if act is RMW, just add edge:
1897                                  *   act --mo--> curr
1898                                  * The following edge should be handled elsewhere:
1899                                  *   readfrom(act) --mo--> act
1900                                  */
1901                                 if (act->is_write())
1902                                         added = mo_graph->addEdge(act, curr) || added;
1903                                 else if (act->is_read()) {
1904                                         //if previous read accessed a null, just keep going
1905                                         if (act->get_reads_from() == NULL)
1906                                                 continue;
1907                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1908                                 }
1909                                 break;
1910                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1911                                                      !act->same_thread(curr)) {
1912                                 /* We have an action that:
1913                                    (1) did not happen before us
1914                                    (2) is a read and we are a write
1915                                    (3) cannot synchronize with us
1916                                    (4) is in a different thread
1917                                    =>
1918                                    that read could potentially read from our write.  Note that
1919                                    these checks are overly conservative at this point, we'll
1920                                    do more checks before actually removing the
1921                                    pendingfuturevalue.
1922
1923                                  */
1924                                 if (thin_air_constraint_may_allow(curr, act)) {
1925                                         if (!is_infeasible())
1926                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1927                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1928                                                 add_future_value(curr, act);
1929                                 }
1930                         }
1931                 }
1932         }
1933
1934         /*
1935          * All compatible, thread-exclusive promises must be ordered after any
1936          * concrete stores to the same thread, or else they can be merged with
1937          * this store later
1938          */
1939         for (unsigned int i = 0; i < promises->size(); i++)
1940                 if ((*promises)[i]->is_compatible_exclusive(curr))
1941                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1942
1943         return added;
1944 }
1945
1946 /** Arbitrary reads from the future are not allowed.  Section 29.3
1947  * part 9 places some constraints.  This method checks one result of constraint
1948  * constraint.  Others require compiler support. */
1949 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1950 {
1951         if (!writer->is_rmw())
1952                 return true;
1953
1954         if (!reader->is_rmw())
1955                 return true;
1956
1957         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1958                 if (search == reader)
1959                         return false;
1960                 if (search->get_tid() == reader->get_tid() &&
1961                                 search->happens_before(reader))
1962                         break;
1963         }
1964
1965         return true;
1966 }
1967
1968 /**
1969  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1970  * some constraints. This method checks one the following constraint (others
1971  * require compiler support):
1972  *
1973  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1974  */
1975 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1976 {
1977         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1978         unsigned int i;
1979         /* Iterate over all threads */
1980         for (i = 0; i < thrd_lists->size(); i++) {
1981                 const ModelAction *write_after_read = NULL;
1982
1983                 /* Iterate over actions in thread, starting from most recent */
1984                 action_list_t *list = &(*thrd_lists)[i];
1985                 action_list_t::reverse_iterator rit;
1986                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1987                         ModelAction *act = *rit;
1988
1989                         /* Don't disallow due to act == reader */
1990                         if (!reader->happens_before(act) || reader == act)
1991                                 break;
1992                         else if (act->is_write())
1993                                 write_after_read = act;
1994                         else if (act->is_read() && act->get_reads_from() != NULL)
1995                                 write_after_read = act->get_reads_from();
1996                 }
1997
1998                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1999                         return false;
2000         }
2001         return true;
2002 }
2003
2004 /**
2005  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2006  * The ModelAction under consideration is expected to be taking part in
2007  * release/acquire synchronization as an object of the "reads from" relation.
2008  * Note that this can only provide release sequence support for RMW chains
2009  * which do not read from the future, as those actions cannot be traced until
2010  * their "promise" is fulfilled. Similarly, we may not even establish the
2011  * presence of a release sequence with certainty, as some modification order
2012  * constraints may be decided further in the future. Thus, this function
2013  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2014  * and a boolean representing certainty.
2015  *
2016  * @param rf The action that might be part of a release sequence. Must be a
2017  * write.
2018  * @param release_heads A pass-by-reference style return parameter. After
2019  * execution of this function, release_heads will contain the heads of all the
2020  * relevant release sequences, if any exists with certainty
2021  * @param pending A pass-by-reference style return parameter which is only used
2022  * when returning false (i.e., uncertain). Returns most information regarding
2023  * an uncertain release sequence, including any write operations that might
2024  * break the sequence.
2025  * @return true, if the ModelChecker is certain that release_heads is complete;
2026  * false otherwise
2027  */
2028 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2029                 rel_heads_list_t *release_heads,
2030                 struct release_seq *pending) const
2031 {
2032         /* Only check for release sequences if there are no cycles */
2033         if (mo_graph->checkForCycles())
2034                 return false;
2035
2036         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2037                 ASSERT(rf->is_write());
2038
2039                 if (rf->is_release())
2040                         release_heads->push_back(rf);
2041                 else if (rf->get_last_fence_release())
2042                         release_heads->push_back(rf->get_last_fence_release());
2043                 if (!rf->is_rmw())
2044                         break; /* End of RMW chain */
2045
2046                 /** @todo Need to be smarter here...  In the linux lock
2047                  * example, this will run to the beginning of the program for
2048                  * every acquire. */
2049                 /** @todo The way to be smarter here is to keep going until 1
2050                  * thread has a release preceded by an acquire and you've seen
2051                  *       both. */
2052
2053                 /* acq_rel RMW is a sufficient stopping condition */
2054                 if (rf->is_acquire() && rf->is_release())
2055                         return true; /* complete */
2056         };
2057         if (!rf) {
2058                 /* read from future: need to settle this later */
2059                 pending->rf = NULL;
2060                 return false; /* incomplete */
2061         }
2062
2063         if (rf->is_release())
2064                 return true; /* complete */
2065
2066         /* else relaxed write
2067          * - check for fence-release in the same thread (29.8, stmt. 3)
2068          * - check modification order for contiguous subsequence
2069          *   -> rf must be same thread as release */
2070
2071         const ModelAction *fence_release = rf->get_last_fence_release();
2072         /* Synchronize with a fence-release unconditionally; we don't need to
2073          * find any more "contiguous subsequence..." for it */
2074         if (fence_release)
2075                 release_heads->push_back(fence_release);
2076
2077         int tid = id_to_int(rf->get_tid());
2078         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2079         action_list_t *list = &(*thrd_lists)[tid];
2080         action_list_t::const_reverse_iterator rit;
2081
2082         /* Find rf in the thread list */
2083         rit = std::find(list->rbegin(), list->rend(), rf);
2084         ASSERT(rit != list->rend());
2085
2086         /* Find the last {write,fence}-release */
2087         for (; rit != list->rend(); rit++) {
2088                 if (fence_release && *(*rit) < *fence_release)
2089                         break;
2090                 if ((*rit)->is_release())
2091                         break;
2092         }
2093         if (rit == list->rend()) {
2094                 /* No write-release in this thread */
2095                 return true; /* complete */
2096         } else if (fence_release && *(*rit) < *fence_release) {
2097                 /* The fence-release is more recent (and so, "stronger") than
2098                  * the most recent write-release */
2099                 return true; /* complete */
2100         } /* else, need to establish contiguous release sequence */
2101         ModelAction *release = *rit;
2102
2103         ASSERT(rf->same_thread(release));
2104
2105         pending->writes.clear();
2106
2107         bool certain = true;
2108         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2109                 if (id_to_int(rf->get_tid()) == (int)i)
2110                         continue;
2111                 list = &(*thrd_lists)[i];
2112
2113                 /* Can we ensure no future writes from this thread may break
2114                  * the release seq? */
2115                 bool future_ordered = false;
2116
2117                 ModelAction *last = get_last_action(int_to_id(i));
2118                 Thread *th = get_thread(int_to_id(i));
2119                 if ((last && rf->happens_before(last)) ||
2120                                 !is_enabled(th) ||
2121                                 th->is_complete())
2122                         future_ordered = true;
2123
2124                 ASSERT(!th->is_model_thread() || future_ordered);
2125
2126                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2127                         const ModelAction *act = *rit;
2128                         /* Reach synchronization -> this thread is complete */
2129                         if (act->happens_before(release))
2130                                 break;
2131                         if (rf->happens_before(act)) {
2132                                 future_ordered = true;
2133                                 continue;
2134                         }
2135
2136                         /* Only non-RMW writes can break release sequences */
2137                         if (!act->is_write() || act->is_rmw())
2138                                 continue;
2139
2140                         /* Check modification order */
2141                         if (mo_graph->checkReachable(rf, act)) {
2142                                 /* rf --mo--> act */
2143                                 future_ordered = true;
2144                                 continue;
2145                         }
2146                         if (mo_graph->checkReachable(act, release))
2147                                 /* act --mo--> release */
2148                                 break;
2149                         if (mo_graph->checkReachable(release, act) &&
2150                                       mo_graph->checkReachable(act, rf)) {
2151                                 /* release --mo-> act --mo--> rf */
2152                                 return true; /* complete */
2153                         }
2154                         /* act may break release sequence */
2155                         pending->writes.push_back(act);
2156                         certain = false;
2157                 }
2158                 if (!future_ordered)
2159                         certain = false; /* This thread is uncertain */
2160         }
2161
2162         if (certain) {
2163                 release_heads->push_back(release);
2164                 pending->writes.clear();
2165         } else {
2166                 pending->release = release;
2167                 pending->rf = rf;
2168         }
2169         return certain;
2170 }
2171
2172 /**
2173  * An interface for getting the release sequence head(s) with which a
2174  * given ModelAction must synchronize. This function only returns a non-empty
2175  * result when it can locate a release sequence head with certainty. Otherwise,
2176  * it may mark the internal state of the ModelChecker so that it will handle
2177  * the release sequence at a later time, causing @a acquire to update its
2178  * synchronization at some later point in execution.
2179  *
2180  * @param acquire The 'acquire' action that may synchronize with a release
2181  * sequence
2182  * @param read The read action that may read from a release sequence; this may
2183  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2184  * when 'acquire' is a fence-acquire)
2185  * @param release_heads A pass-by-reference return parameter. Will be filled
2186  * with the head(s) of the release sequence(s), if they exists with certainty.
2187  * @see ModelChecker::release_seq_heads
2188  */
2189 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2190                 ModelAction *read, rel_heads_list_t *release_heads)
2191 {
2192         const ModelAction *rf = read->get_reads_from();
2193         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2194         sequence->acquire = acquire;
2195         sequence->read = read;
2196
2197         if (!release_seq_heads(rf, release_heads, sequence)) {
2198                 /* add act to 'lazy checking' list */
2199                 pending_rel_seqs->push_back(sequence);
2200         } else {
2201                 snapshot_free(sequence);
2202         }
2203 }
2204
2205 /**
2206  * Attempt to resolve all stashed operations that might synchronize with a
2207  * release sequence for a given location. This implements the "lazy" portion of
2208  * determining whether or not a release sequence was contiguous, since not all
2209  * modification order information is present at the time an action occurs.
2210  *
2211  * @param location The location/object that should be checked for release
2212  * sequence resolutions. A NULL value means to check all locations.
2213  * @param work_queue The work queue to which to add work items as they are
2214  * generated
2215  * @return True if any updates occurred (new synchronization, new mo_graph
2216  * edges)
2217  */
2218 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2219 {
2220         bool updated = false;
2221         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2222         while (it != pending_rel_seqs->end()) {
2223                 struct release_seq *pending = *it;
2224                 ModelAction *acquire = pending->acquire;
2225                 const ModelAction *read = pending->read;
2226
2227                 /* Only resolve sequences on the given location, if provided */
2228                 if (location && read->get_location() != location) {
2229                         it++;
2230                         continue;
2231                 }
2232
2233                 const ModelAction *rf = read->get_reads_from();
2234                 rel_heads_list_t release_heads;
2235                 bool complete;
2236                 complete = release_seq_heads(rf, &release_heads, pending);
2237                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2238                         if (!acquire->has_synchronized_with(release_heads[i])) {
2239                                 if (acquire->synchronize_with(release_heads[i]))
2240                                         updated = true;
2241                                 else
2242                                         set_bad_synchronization();
2243                         }
2244                 }
2245
2246                 if (updated) {
2247                         /* Re-check all pending release sequences */
2248                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2249                         /* Re-check read-acquire for mo_graph edges */
2250                         if (acquire->is_read())
2251                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2252
2253                         /* propagate synchronization to later actions */
2254                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2255                         for (; (*rit) != acquire; rit++) {
2256                                 ModelAction *propagate = *rit;
2257                                 if (acquire->happens_before(propagate)) {
2258                                         propagate->synchronize_with(acquire);
2259                                         /* Re-check 'propagate' for mo_graph edges */
2260                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2261                                 }
2262                         }
2263                 }
2264                 if (complete) {
2265                         it = pending_rel_seqs->erase(it);
2266                         snapshot_free(pending);
2267                 } else {
2268                         it++;
2269                 }
2270         }
2271
2272         // If we resolved promises or data races, see if we have realized a data race.
2273         checkDataRaces();
2274
2275         return updated;
2276 }
2277
2278 /**
2279  * Performs various bookkeeping operations for the current ModelAction. For
2280  * instance, adds action to the per-object, per-thread action vector and to the
2281  * action trace list of all thread actions.
2282  *
2283  * @param act is the ModelAction to add.
2284  */
2285 void ModelChecker::add_action_to_lists(ModelAction *act)
2286 {
2287         int tid = id_to_int(act->get_tid());
2288         ModelAction *uninit = NULL;
2289         int uninit_id = -1;
2290         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2291         if (list->empty() && act->is_atomic_var()) {
2292                 uninit = new_uninitialized_action(act->get_location());
2293                 uninit_id = id_to_int(uninit->get_tid());
2294                 list->push_back(uninit);
2295         }
2296         list->push_back(act);
2297
2298         action_trace->push_back(act);
2299         if (uninit)
2300                 action_trace->push_front(uninit);
2301
2302         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2303         if (tid >= (int)vec->size())
2304                 vec->resize(priv->next_thread_id);
2305         (*vec)[tid].push_back(act);
2306         if (uninit)
2307                 (*vec)[uninit_id].push_front(uninit);
2308
2309         if ((int)thrd_last_action->size() <= tid)
2310                 thrd_last_action->resize(get_num_threads());
2311         (*thrd_last_action)[tid] = act;
2312         if (uninit)
2313                 (*thrd_last_action)[uninit_id] = uninit;
2314
2315         if (act->is_fence() && act->is_release()) {
2316                 if ((int)thrd_last_fence_release->size() <= tid)
2317                         thrd_last_fence_release->resize(get_num_threads());
2318                 (*thrd_last_fence_release)[tid] = act;
2319         }
2320
2321         if (act->is_wait()) {
2322                 void *mutex_loc = (void *) act->get_value();
2323                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2324
2325                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2326                 if (tid >= (int)vec->size())
2327                         vec->resize(priv->next_thread_id);
2328                 (*vec)[tid].push_back(act);
2329         }
2330 }
2331
2332 /**
2333  * @brief Get the last action performed by a particular Thread
2334  * @param tid The thread ID of the Thread in question
2335  * @return The last action in the thread
2336  */
2337 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2338 {
2339         int threadid = id_to_int(tid);
2340         if (threadid < (int)thrd_last_action->size())
2341                 return (*thrd_last_action)[id_to_int(tid)];
2342         else
2343                 return NULL;
2344 }
2345
2346 /**
2347  * @brief Get the last fence release performed by a particular Thread
2348  * @param tid The thread ID of the Thread in question
2349  * @return The last fence release in the thread, if one exists; NULL otherwise
2350  */
2351 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2352 {
2353         int threadid = id_to_int(tid);
2354         if (threadid < (int)thrd_last_fence_release->size())
2355                 return (*thrd_last_fence_release)[id_to_int(tid)];
2356         else
2357                 return NULL;
2358 }
2359
2360 /**
2361  * Gets the last memory_order_seq_cst write (in the total global sequence)
2362  * performed on a particular object (i.e., memory location), not including the
2363  * current action.
2364  * @param curr The current ModelAction; also denotes the object location to
2365  * check
2366  * @return The last seq_cst write
2367  */
2368 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2369 {
2370         void *location = curr->get_location();
2371         action_list_t *list = get_safe_ptr_action(obj_map, location);
2372         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2373         action_list_t::reverse_iterator rit;
2374         for (rit = list->rbegin(); rit != list->rend(); rit++)
2375                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2376                         return *rit;
2377         return NULL;
2378 }
2379
2380 /**
2381  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2382  * performed in a particular thread, prior to a particular fence.
2383  * @param tid The ID of the thread to check
2384  * @param before_fence The fence from which to begin the search; if NULL, then
2385  * search for the most recent fence in the thread.
2386  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2387  */
2388 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2389 {
2390         /* All fences should have NULL location */
2391         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2392         action_list_t::reverse_iterator rit = list->rbegin();
2393
2394         if (before_fence) {
2395                 for (; rit != list->rend(); rit++)
2396                         if (*rit == before_fence)
2397                                 break;
2398
2399                 ASSERT(*rit == before_fence);
2400                 rit++;
2401         }
2402
2403         for (; rit != list->rend(); rit++)
2404                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2405                         return *rit;
2406         return NULL;
2407 }
2408
2409 /**
2410  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2411  * location). This function identifies the mutex according to the current
2412  * action, which is presumed to perform on the same mutex.
2413  * @param curr The current ModelAction; also denotes the object location to
2414  * check
2415  * @return The last unlock operation
2416  */
2417 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2418 {
2419         void *location = curr->get_location();
2420         action_list_t *list = get_safe_ptr_action(obj_map, location);
2421         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2422         action_list_t::reverse_iterator rit;
2423         for (rit = list->rbegin(); rit != list->rend(); rit++)
2424                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2425                         return *rit;
2426         return NULL;
2427 }
2428
2429 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2430 {
2431         ModelAction *parent = get_last_action(tid);
2432         if (!parent)
2433                 parent = get_thread(tid)->get_creation();
2434         return parent;
2435 }
2436
2437 /**
2438  * Returns the clock vector for a given thread.
2439  * @param tid The thread whose clock vector we want
2440  * @return Desired clock vector
2441  */
2442 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2443 {
2444         return get_parent_action(tid)->get_cv();
2445 }
2446
2447 /**
2448  * Resolve a set of Promises with a current write. The set is provided in the
2449  * Node corresponding to @a write.
2450  * @param write The ModelAction that is fulfilling Promises
2451  * @return True if promises were resolved; false otherwise
2452  */
2453 bool ModelChecker::resolve_promises(ModelAction *write)
2454 {
2455         bool haveResolved = false;
2456         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2457         promise_list_t mustResolve, resolved;
2458
2459         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2460                 Promise *promise = (*promises)[promise_index];
2461                 if (write->get_node()->get_promise(i)) {
2462                         for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2463                                 ModelAction *read = promise->get_reader(j);
2464                                 read_from(read, write);
2465                                 actions_to_check.push_back(read);
2466                         }
2467                         //Make sure the promise's value matches the write's value
2468                         ASSERT(promise->is_compatible(write));
2469                         mo_graph->resolvePromise(promise, write, &mustResolve);
2470
2471                         resolved.push_back(promise);
2472                         promises->erase(promises->begin() + promise_index);
2473
2474                         haveResolved = true;
2475                 } else
2476                         promise_index++;
2477         }
2478
2479         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2480                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2481                                 == resolved.end())
2482                         priv->failed_promise = true;
2483         }
2484         for (unsigned int i = 0; i < resolved.size(); i++)
2485                 delete resolved[i];
2486         //Check whether reading these writes has made threads unable to
2487         //resolve promises
2488
2489         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2490                 ModelAction *read = actions_to_check[i];
2491                 mo_check_promises(read, true);
2492         }
2493
2494         return haveResolved;
2495 }
2496
2497 /**
2498  * Compute the set of promises that could potentially be satisfied by this
2499  * action. Note that the set computation actually appears in the Node, not in
2500  * ModelChecker.
2501  * @param curr The ModelAction that may satisfy promises
2502  */
2503 void ModelChecker::compute_promises(ModelAction *curr)
2504 {
2505         for (unsigned int i = 0; i < promises->size(); i++) {
2506                 Promise *promise = (*promises)[i];
2507                 if (!promise->is_compatible(curr) || promise->get_value() != curr->get_value())
2508                         continue;
2509
2510                 bool satisfy = true;
2511                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2512                         const ModelAction *act = promise->get_reader(j);
2513                         if (act->happens_before(curr) ||
2514                                         act->could_synchronize_with(curr)) {
2515                                 satisfy = false;
2516                                 break;
2517                         }
2518                 }
2519                 if (satisfy)
2520                         curr->get_node()->set_promise(i);
2521         }
2522 }
2523
2524 /** Checks promises in response to change in ClockVector Threads. */
2525 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2526 {
2527         for (unsigned int i = 0; i < promises->size(); i++) {
2528                 Promise *promise = (*promises)[i];
2529                 if (!promise->thread_is_available(tid))
2530                         continue;
2531                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2532                         const ModelAction *act = promise->get_reader(j);
2533                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2534                                         merge_cv->synchronized_since(act)) {
2535                                 if (promise->eliminate_thread(tid)) {
2536                                         /* Promise has failed */
2537                                         priv->failed_promise = true;
2538                                         return;
2539                                 }
2540                         }
2541                 }
2542         }
2543 }
2544
2545 void ModelChecker::check_promises_thread_disabled()
2546 {
2547         for (unsigned int i = 0; i < promises->size(); i++) {
2548                 Promise *promise = (*promises)[i];
2549                 if (promise->has_failed()) {
2550                         priv->failed_promise = true;
2551                         return;
2552                 }
2553         }
2554 }
2555
2556 /**
2557  * @brief Checks promises in response to addition to modification order for
2558  * threads.
2559  *
2560  * We test whether threads are still available for satisfying promises after an
2561  * addition to our modification order constraints. Those that are unavailable
2562  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2563  * that promise has failed.
2564  *
2565  * @param act The ModelAction which updated the modification order
2566  * @param is_read_check Should be true if act is a read and we must check for
2567  * updates to the store from which it read (there is a distinction here for
2568  * RMW's, which are both a load and a store)
2569  */
2570 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2571 {
2572         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2573
2574         for (unsigned int i = 0; i < promises->size(); i++) {
2575                 Promise *promise = (*promises)[i];
2576
2577                 // Is this promise on the same location?
2578                 if (promise->get_value() != write->get_value())
2579                         continue;
2580
2581                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2582                         const ModelAction *pread = promise->get_reader(j);
2583                         if (!pread->happens_before(act))
2584                                continue;
2585                         if (mo_graph->checkPromise(write, promise)) {
2586                                 priv->failed_promise = true;
2587                                 return;
2588                         }
2589                         break;
2590                 }
2591
2592                 // Don't do any lookups twice for the same thread
2593                 if (!promise->thread_is_available(act->get_tid()))
2594                         continue;
2595
2596                 if (mo_graph->checkReachable(promise, write)) {
2597                         if (mo_graph->checkPromise(write, promise)) {
2598                                 priv->failed_promise = true;
2599                                 return;
2600                         }
2601                 }
2602         }
2603 }
2604
2605 /**
2606  * Compute the set of writes that may break the current pending release
2607  * sequence. This information is extracted from previou release sequence
2608  * calculations.
2609  *
2610  * @param curr The current ModelAction. Must be a release sequence fixup
2611  * action.
2612  */
2613 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2614 {
2615         if (pending_rel_seqs->empty())
2616                 return;
2617
2618         struct release_seq *pending = pending_rel_seqs->back();
2619         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2620                 const ModelAction *write = pending->writes[i];
2621                 curr->get_node()->add_relseq_break(write);
2622         }
2623
2624         /* NULL means don't break the sequence; just synchronize */
2625         curr->get_node()->add_relseq_break(NULL);
2626 }
2627
2628 /**
2629  * Build up an initial set of all past writes that this 'read' action may read
2630  * from, as well as any previously-observed future values that must still be valid.
2631  *
2632  * @param curr is the current ModelAction that we are exploring; it must be a
2633  * 'read' operation.
2634  */
2635 void ModelChecker::build_may_read_from(ModelAction *curr)
2636 {
2637         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2638         unsigned int i;
2639         ASSERT(curr->is_read());
2640
2641         ModelAction *last_sc_write = NULL;
2642
2643         if (curr->is_seqcst())
2644                 last_sc_write = get_last_seq_cst_write(curr);
2645
2646         /* Iterate over all threads */
2647         for (i = 0; i < thrd_lists->size(); i++) {
2648                 /* Iterate over actions in thread, starting from most recent */
2649                 action_list_t *list = &(*thrd_lists)[i];
2650                 action_list_t::reverse_iterator rit;
2651                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2652                         ModelAction *act = *rit;
2653
2654                         /* Only consider 'write' actions */
2655                         if (!act->is_write() || act == curr)
2656                                 continue;
2657
2658                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2659                         bool allow_read = true;
2660
2661                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2662                                 allow_read = false;
2663                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2664                                 allow_read = false;
2665
2666                         if (allow_read) {
2667                                 /* Only add feasible reads */
2668                                 mo_graph->startChanges();
2669                                 r_modification_order(curr, act);
2670                                 if (!is_infeasible())
2671                                         curr->get_node()->add_read_from_past(act);
2672                                 mo_graph->rollbackChanges();
2673                         }
2674
2675                         /* Include at most one act per-thread that "happens before" curr */
2676                         if (act->happens_before(curr))
2677                                 break;
2678                 }
2679         }
2680
2681         /* Inherit existing, promised future values */
2682         for (i = 0; i < promises->size(); i++) {
2683                 const Promise *promise = (*promises)[i];
2684                 const ModelAction *promise_read = promise->get_reader(0);
2685                 if (promise_read->same_var(curr)) {
2686                         /* Only add feasible future-values */
2687                         mo_graph->startChanges();
2688                         r_modification_order(curr, promise);
2689                         if (!is_infeasible())
2690                                 curr->get_node()->add_read_from_promise(promise_read);
2691                         mo_graph->rollbackChanges();
2692                 }
2693         }
2694
2695         /* We may find no valid may-read-from only if the execution is doomed */
2696         if (!curr->get_node()->read_from_size()) {
2697                 priv->no_valid_reads = true;
2698                 set_assert();
2699         }
2700
2701         if (DBG_ENABLED()) {
2702                 model_print("Reached read action:\n");
2703                 curr->print();
2704                 model_print("Printing read_from_past\n");
2705                 curr->get_node()->print_read_from_past();
2706                 model_print("End printing read_from_past\n");
2707         }
2708 }
2709
2710 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2711 {
2712         for ( ; write != NULL; write = write->get_reads_from()) {
2713                 /* UNINIT actions don't have a Node, and they never sleep */
2714                 if (write->is_uninitialized())
2715                         return true;
2716                 Node *prevnode = write->get_node()->get_parent();
2717
2718                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2719                 if (write->is_release() && thread_sleep)
2720                         return true;
2721                 if (!write->is_rmw())
2722                         return false;
2723         }
2724         return true;
2725 }
2726
2727 /**
2728  * @brief Create a new action representing an uninitialized atomic
2729  * @param location The memory location of the atomic object
2730  * @return A pointer to a new ModelAction
2731  */
2732 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2733 {
2734         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2735         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2736         act->create_cv(NULL);
2737         return act;
2738 }
2739
2740 static void print_list(action_list_t *list)
2741 {
2742         action_list_t::iterator it;
2743
2744         model_print("---------------------------------------------------------------------\n");
2745
2746         unsigned int hash = 0;
2747
2748         for (it = list->begin(); it != list->end(); it++) {
2749                 (*it)->print();
2750                 hash = hash^(hash<<3)^((*it)->hash());
2751         }
2752         model_print("HASH %u\n", hash);
2753         model_print("---------------------------------------------------------------------\n");
2754 }
2755
2756 #if SUPPORT_MOD_ORDER_DUMP
2757 void ModelChecker::dumpGraph(char *filename) const
2758 {
2759         char buffer[200];
2760         sprintf(buffer, "%s.dot", filename);
2761         FILE *file = fopen(buffer, "w");
2762         fprintf(file, "digraph %s {\n", filename);
2763         mo_graph->dumpNodes(file);
2764         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2765
2766         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2767                 ModelAction *act = *it;
2768                 if (act->is_read()) {
2769                         mo_graph->dot_print_node(file, act);
2770                         if (act->get_reads_from())
2771                                 mo_graph->dot_print_edge(file,
2772                                                 act->get_reads_from(),
2773                                                 act,
2774                                                 "label=\"rf\", color=red, weight=2");
2775                         else
2776                                 mo_graph->dot_print_edge(file,
2777                                                 act->get_reads_from_promise(),
2778                                                 act,
2779                                                 "label=\"rf\", color=red");
2780                 }
2781                 if (thread_array[act->get_tid()]) {
2782                         mo_graph->dot_print_edge(file,
2783                                         thread_array[id_to_int(act->get_tid())],
2784                                         act,
2785                                         "label=\"sb\", color=blue, weight=400");
2786                 }
2787
2788                 thread_array[act->get_tid()] = act;
2789         }
2790         fprintf(file, "}\n");
2791         model_free(thread_array);
2792         fclose(file);
2793 }
2794 #endif
2795
2796 /** @brief Prints an execution trace summary. */
2797 void ModelChecker::print_summary() const
2798 {
2799 #if SUPPORT_MOD_ORDER_DUMP
2800         char buffername[100];
2801         sprintf(buffername, "exec%04u", stats.num_total);
2802         mo_graph->dumpGraphToFile(buffername);
2803         sprintf(buffername, "graph%04u", stats.num_total);
2804         dumpGraph(buffername);
2805 #endif
2806
2807         model_print("Execution %d:", stats.num_total);
2808         if (isfeasibleprefix()) {
2809                 if (scheduler->all_threads_sleeping())
2810                         model_print(" SLEEP-SET REDUNDANT");
2811                 model_print("\n");
2812         } else
2813                 print_infeasibility(" INFEASIBLE");
2814         print_list(action_trace);
2815         model_print("\n");
2816 }
2817
2818 /**
2819  * Add a Thread to the system for the first time. Should only be called once
2820  * per thread.
2821  * @param t The Thread to add
2822  */
2823 void ModelChecker::add_thread(Thread *t)
2824 {
2825         thread_map->put(id_to_int(t->get_id()), t);
2826         scheduler->add_thread(t);
2827 }
2828
2829 /**
2830  * Removes a thread from the scheduler.
2831  * @param the thread to remove.
2832  */
2833 void ModelChecker::remove_thread(Thread *t)
2834 {
2835         scheduler->remove_thread(t);
2836 }
2837
2838 /**
2839  * @brief Get a Thread reference by its ID
2840  * @param tid The Thread's ID
2841  * @return A Thread reference
2842  */
2843 Thread * ModelChecker::get_thread(thread_id_t tid) const
2844 {
2845         return thread_map->get(id_to_int(tid));
2846 }
2847
2848 /**
2849  * @brief Get a reference to the Thread in which a ModelAction was executed
2850  * @param act The ModelAction
2851  * @return A Thread reference
2852  */
2853 Thread * ModelChecker::get_thread(const ModelAction *act) const
2854 {
2855         return get_thread(act->get_tid());
2856 }
2857
2858 /**
2859  * @brief Get a Promise's "promise number"
2860  *
2861  * A "promise number" is an index number that is unique to a promise, valid
2862  * only for a specific snapshot of an execution trace. Promises may come and go
2863  * as they are generated an resolved, so an index only retains meaning for the
2864  * current snapshot.
2865  *
2866  * @param promise The Promise to check
2867  * @return The promise index, if the promise still is valid; otherwise -1
2868  */
2869 int ModelChecker::get_promise_number(const Promise *promise) const
2870 {
2871         for (unsigned int i = 0; i < promises->size(); i++)
2872                 if ((*promises)[i] == promise)
2873                         return i;
2874         /* Not found */
2875         return -1;
2876 }
2877
2878 /**
2879  * @brief Check if a Thread is currently enabled
2880  * @param t The Thread to check
2881  * @return True if the Thread is currently enabled
2882  */
2883 bool ModelChecker::is_enabled(Thread *t) const
2884 {
2885         return scheduler->is_enabled(t);
2886 }
2887
2888 /**
2889  * @brief Check if a Thread is currently enabled
2890  * @param tid The ID of the Thread to check
2891  * @return True if the Thread is currently enabled
2892  */
2893 bool ModelChecker::is_enabled(thread_id_t tid) const
2894 {
2895         return scheduler->is_enabled(tid);
2896 }
2897
2898 /**
2899  * Switch from a model-checker context to a user-thread context. This is the
2900  * complement of ModelChecker::switch_to_master and must be called from the
2901  * model-checker context
2902  *
2903  * @param thread The user-thread to switch to
2904  */
2905 void ModelChecker::switch_from_master(Thread *thread)
2906 {
2907         scheduler->set_current_thread(thread);
2908         Thread::swap(&system_context, thread);
2909 }
2910
2911 /**
2912  * Switch from a user-context to the "master thread" context (a.k.a. system
2913  * context). This switch is made with the intention of exploring a particular
2914  * model-checking action (described by a ModelAction object). Must be called
2915  * from a user-thread context.
2916  *
2917  * @param act The current action that will be explored. May be NULL only if
2918  * trace is exiting via an assertion (see ModelChecker::set_assert and
2919  * ModelChecker::has_asserted).
2920  * @return Return the value returned by the current action
2921  */
2922 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2923 {
2924         DBG();
2925         Thread *old = thread_current();
2926         ASSERT(!old->get_pending());
2927         old->set_pending(act);
2928         if (Thread::swap(old, &system_context) < 0) {
2929                 perror("swap threads");
2930                 exit(EXIT_FAILURE);
2931         }
2932         return old->get_return_value();
2933 }
2934
2935 /**
2936  * Takes the next step in the execution, if possible.
2937  * @param curr The current step to take
2938  * @return Returns the next Thread to run, if any; NULL if this execution
2939  * should terminate
2940  */
2941 Thread * ModelChecker::take_step(ModelAction *curr)
2942 {
2943         Thread *curr_thrd = get_thread(curr);
2944         ASSERT(curr_thrd->get_state() == THREAD_READY);
2945
2946         curr = check_current_action(curr);
2947
2948         /* Infeasible -> don't take any more steps */
2949         if (is_infeasible())
2950                 return NULL;
2951         else if (isfeasibleprefix() && have_bug_reports()) {
2952                 set_assert();
2953                 return NULL;
2954         }
2955
2956         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2957                 return NULL;
2958
2959         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2960                 scheduler->remove_thread(curr_thrd);
2961
2962         Thread *next_thrd = get_next_thread(curr);
2963
2964         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2965                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2966
2967         return next_thrd;
2968 }
2969
2970 /** Wrapper to run the user's main function, with appropriate arguments */
2971 void user_main_wrapper(void *)
2972 {
2973         user_main(model->params.argc, model->params.argv);
2974 }
2975
2976 /** @brief Run ModelChecker for the user program */
2977 void ModelChecker::run()
2978 {
2979         do {
2980                 thrd_t user_thread;
2981                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2982                 add_thread(t);
2983
2984                 do {
2985                         /*
2986                          * Stash next pending action(s) for thread(s). There
2987                          * should only need to stash one thread's action--the
2988                          * thread which just took a step--plus the first step
2989                          * for any newly-created thread
2990                          */
2991                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2992                                 thread_id_t tid = int_to_id(i);
2993                                 Thread *thr = get_thread(tid);
2994                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2995                                         switch_from_master(thr);
2996                                 }
2997                         }
2998
2999                         /* Catch assertions from prior take_step or from
3000                          * between-ModelAction bugs (e.g., data races) */
3001                         if (has_asserted())
3002                                 break;
3003
3004                         /* Consume the next action for a Thread */
3005                         ModelAction *curr = t->get_pending();
3006                         t->set_pending(NULL);
3007                         t = take_step(curr);
3008                 } while (t && !t->is_model_thread());
3009
3010                 /*
3011                  * Launch end-of-execution release sequence fixups only when
3012                  * the execution is otherwise feasible AND there are:
3013                  *
3014                  * (1) pending release sequences
3015                  * (2) pending assertions that could be invalidated by a change
3016                  * in clock vectors (i.e., data races)
3017                  * (3) no pending promises
3018                  */
3019                 while (!pending_rel_seqs->empty() &&
3020                                 is_feasible_prefix_ignore_relseq() &&
3021                                 !unrealizedraces.empty()) {
3022                         model_print("*** WARNING: release sequence fixup action "
3023                                         "(%zu pending release seuqence(s)) ***\n",
3024                                         pending_rel_seqs->size());
3025                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3026                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3027                                         model_thread);
3028                         take_step(fixup);
3029                 };
3030         } while (next_execution());
3031
3032         model_print("******* Model-checking complete: *******\n");
3033         print_stats();
3034 }