94bc58b6c5a010254ac34a509afe1a135fa1247d
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* Start the round robin scheduler from this thread id */
286                 scheduler->set_scheduler_thread(tid);
287                 /* The correct sleep set is in the parent node. */
288                 execute_sleep_set();
289
290                 DEBUG("*** Divergence point ***\n");
291
292                 diverge = NULL;
293         } else {
294                 tid = next->get_tid();
295         }
296         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
297         ASSERT(tid != THREAD_ID_T_NONE);
298         return thread_map->get(id_to_int(tid));
299 }
300
301 /**
302  * We need to know what the next actions of all threads in the sleep
303  * set will be.  This method computes them and stores the actions at
304  * the corresponding thread object's pending action.
305  */
306
307 void ModelChecker::execute_sleep_set()
308 {
309         for (unsigned int i = 0; i < get_num_threads(); i++) {
310                 thread_id_t tid = int_to_id(i);
311                 Thread *thr = get_thread(tid);
312                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
313                         thr->get_pending()->set_sleep_flag();
314                 }
315         }
316 }
317
318 /**
319  * @brief Should the current action wake up a given thread?
320  *
321  * @param curr The current action
322  * @param thread The thread that we might wake up
323  * @return True, if we should wake up the sleeping thread; false otherwise
324  */
325 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
326 {
327         const ModelAction *asleep = thread->get_pending();
328         /* Don't allow partial RMW to wake anyone up */
329         if (curr->is_rmwr())
330                 return false;
331         /* Synchronizing actions may have been backtracked */
332         if (asleep->could_synchronize_with(curr))
333                 return true;
334         /* All acquire/release fences and fence-acquire/store-release */
335         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
336                 return true;
337         /* Fence-release + store can awake load-acquire on the same location */
338         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
339                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
340                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
341                         return true;
342         }
343         return false;
344 }
345
346 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
347 {
348         for (unsigned int i = 0; i < get_num_threads(); i++) {
349                 Thread *thr = get_thread(int_to_id(i));
350                 if (scheduler->is_sleep_set(thr)) {
351                         if (should_wake_up(curr, thr))
352                                 /* Remove this thread from sleep set */
353                                 scheduler->remove_sleep(thr);
354                 }
355         }
356 }
357
358 /** @brief Alert the model-checker that an incorrectly-ordered
359  * synchronization was made */
360 void ModelChecker::set_bad_synchronization()
361 {
362         priv->bad_synchronization = true;
363 }
364
365 /**
366  * Check whether the current trace has triggered an assertion which should halt
367  * its execution.
368  *
369  * @return True, if the execution should be aborted; false otherwise
370  */
371 bool ModelChecker::has_asserted() const
372 {
373         return priv->asserted;
374 }
375
376 /**
377  * Trigger a trace assertion which should cause this execution to be halted.
378  * This can be due to a detected bug or due to an infeasibility that should
379  * halt ASAP.
380  */
381 void ModelChecker::set_assert()
382 {
383         priv->asserted = true;
384 }
385
386 /**
387  * Check if we are in a deadlock. Should only be called at the end of an
388  * execution, although it should not give false positives in the middle of an
389  * execution (there should be some ENABLED thread).
390  *
391  * @return True if program is in a deadlock; false otherwise
392  */
393 bool ModelChecker::is_deadlocked() const
394 {
395         bool blocking_threads = false;
396         for (unsigned int i = 0; i < get_num_threads(); i++) {
397                 thread_id_t tid = int_to_id(i);
398                 if (is_enabled(tid))
399                         return false;
400                 Thread *t = get_thread(tid);
401                 if (!t->is_model_thread() && t->get_pending())
402                         blocking_threads = true;
403         }
404         return blocking_threads;
405 }
406
407 /**
408  * Check if this is a complete execution. That is, have all thread completed
409  * execution (rather than exiting because sleep sets have forced a redundant
410  * execution).
411  *
412  * @return True if the execution is complete.
413  */
414 bool ModelChecker::is_complete_execution() const
415 {
416         for (unsigned int i = 0; i < get_num_threads(); i++)
417                 if (is_enabled(int_to_id(i)))
418                         return false;
419         return true;
420 }
421
422 /**
423  * @brief Assert a bug in the executing program.
424  *
425  * Use this function to assert any sort of bug in the user program. If the
426  * current trace is feasible (actually, a prefix of some feasible execution),
427  * then this execution will be aborted, printing the appropriate message. If
428  * the current trace is not yet feasible, the error message will be stashed and
429  * printed if the execution ever becomes feasible.
430  *
431  * @param msg Descriptive message for the bug (do not include newline char)
432  * @return True if bug is immediately-feasible
433  */
434 bool ModelChecker::assert_bug(const char *msg)
435 {
436         priv->bugs.push_back(new bug_message(msg));
437
438         if (isfeasibleprefix()) {
439                 set_assert();
440                 return true;
441         }
442         return false;
443 }
444
445 /**
446  * @brief Assert a bug in the executing program, asserted by a user thread
447  * @see ModelChecker::assert_bug
448  * @param msg Descriptive message for the bug (do not include newline char)
449  */
450 void ModelChecker::assert_user_bug(const char *msg)
451 {
452         /* If feasible bug, bail out now */
453         if (assert_bug(msg))
454                 switch_to_master(NULL);
455 }
456
457 /** @return True, if any bugs have been reported for this execution */
458 bool ModelChecker::have_bug_reports() const
459 {
460         return priv->bugs.size() != 0;
461 }
462
463 /** @brief Print bug report listing for this execution (if any bugs exist) */
464 void ModelChecker::print_bugs() const
465 {
466         if (have_bug_reports()) {
467                 model_print("Bug report: %zu bug%s detected\n",
468                                 priv->bugs.size(),
469                                 priv->bugs.size() > 1 ? "s" : "");
470                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
471                         priv->bugs[i]->print();
472         }
473 }
474
475 /**
476  * @brief Record end-of-execution stats
477  *
478  * Must be run when exiting an execution. Records various stats.
479  * @see struct execution_stats
480  */
481 void ModelChecker::record_stats()
482 {
483         stats.num_total++;
484         if (!isfeasibleprefix())
485                 stats.num_infeasible++;
486         else if (have_bug_reports())
487                 stats.num_buggy_executions++;
488         else if (is_complete_execution())
489                 stats.num_complete++;
490         else
491                 stats.num_redundant++;
492 }
493
494 /** @brief Print execution stats */
495 void ModelChecker::print_stats() const
496 {
497         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
498         model_print("Number of redundant executions: %d\n", stats.num_redundant);
499         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
500         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
501         model_print("Total executions: %d\n", stats.num_total);
502         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
503 }
504
505 /**
506  * @brief End-of-exeuction print
507  * @param printbugs Should any existing bugs be printed?
508  */
509 void ModelChecker::print_execution(bool printbugs) const
510 {
511         print_program_output();
512
513         if (DBG_ENABLED() || params.verbose) {
514                 model_print("Earliest divergence point since last feasible execution:\n");
515                 if (earliest_diverge)
516                         earliest_diverge->print();
517                 else
518                         model_print("(Not set)\n");
519
520                 model_print("\n");
521                 print_stats();
522         }
523
524         /* Don't print invalid bugs */
525         if (printbugs)
526                 print_bugs();
527
528         model_print("\n");
529         print_summary();
530 }
531
532 /**
533  * Queries the model-checker for more executions to explore and, if one
534  * exists, resets the model-checker state to execute a new execution.
535  *
536  * @return If there are more executions to explore, return true. Otherwise,
537  * return false.
538  */
539 bool ModelChecker::next_execution()
540 {
541         DBG();
542         /* Is this execution a feasible execution that's worth bug-checking? */
543         bool complete = isfeasibleprefix() && (is_complete_execution() ||
544                         have_bug_reports());
545
546         /* End-of-execution bug checks */
547         if (complete) {
548                 if (is_deadlocked())
549                         assert_bug("Deadlock detected");
550
551                 checkDataRaces();
552         }
553
554         record_stats();
555
556         /* Output */
557         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
558                 print_execution(complete);
559         else
560                 clear_program_output();
561
562         if (complete)
563                 earliest_diverge = NULL;
564
565         if ((diverge = get_next_backtrack()) == NULL)
566                 return false;
567
568         if (DBG_ENABLED()) {
569                 model_print("Next execution will diverge at:\n");
570                 diverge->print();
571         }
572
573         reset_to_initial_state();
574         return true;
575 }
576
577 /**
578  * @brief Find the last fence-related backtracking conflict for a ModelAction
579  *
580  * This function performs the search for the most recent conflicting action
581  * against which we should perform backtracking, as affected by fence
582  * operations. This includes pairs of potentially-synchronizing actions which
583  * occur due to fence-acquire or fence-release, and hence should be explored in
584  * the opposite execution order.
585  *
586  * @param act The current action
587  * @return The most recent action which conflicts with act due to fences
588  */
589 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
590 {
591         /* Only perform release/acquire fence backtracking for stores */
592         if (!act->is_write())
593                 return NULL;
594
595         /* Find a fence-release (or, act is a release) */
596         ModelAction *last_release;
597         if (act->is_release())
598                 last_release = act;
599         else
600                 last_release = get_last_fence_release(act->get_tid());
601         if (!last_release)
602                 return NULL;
603
604         /* Skip past the release */
605         action_list_t *list = action_trace;
606         action_list_t::reverse_iterator rit;
607         for (rit = list->rbegin(); rit != list->rend(); rit++)
608                 if (*rit == last_release)
609                         break;
610         ASSERT(rit != list->rend());
611
612         /* Find a prior:
613          *   load-acquire
614          * or
615          *   load --sb-> fence-acquire */
616         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
617         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
618         bool found_acquire_fences = false;
619         for ( ; rit != list->rend(); rit++) {
620                 ModelAction *prev = *rit;
621                 if (act->same_thread(prev))
622                         continue;
623
624                 int tid = id_to_int(prev->get_tid());
625
626                 if (prev->is_read() && act->same_var(prev)) {
627                         if (prev->is_acquire()) {
628                                 /* Found most recent load-acquire, don't need
629                                  * to search for more fences */
630                                 if (!found_acquire_fences)
631                                         return NULL;
632                         } else {
633                                 prior_loads[tid] = prev;
634                         }
635                 }
636                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
637                         found_acquire_fences = true;
638                         acquire_fences[tid] = prev;
639                 }
640         }
641
642         ModelAction *latest_backtrack = NULL;
643         for (unsigned int i = 0; i < acquire_fences.size(); i++)
644                 if (acquire_fences[i] && prior_loads[i])
645                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
646                                 latest_backtrack = acquire_fences[i];
647         return latest_backtrack;
648 }
649
650 /**
651  * @brief Find the last backtracking conflict for a ModelAction
652  *
653  * This function performs the search for the most recent conflicting action
654  * against which we should perform backtracking. This primary includes pairs of
655  * synchronizing actions which should be explored in the opposite execution
656  * order.
657  *
658  * @param act The current action
659  * @return The most recent action which conflicts with act
660  */
661 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
662 {
663         switch (act->get_type()) {
664         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
665         case ATOMIC_READ:
666         case ATOMIC_WRITE:
667         case ATOMIC_RMW: {
668                 ModelAction *ret = NULL;
669
670                 /* linear search: from most recent to oldest */
671                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
672                 action_list_t::reverse_iterator rit;
673                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
674                         ModelAction *prev = *rit;
675                         if (prev->could_synchronize_with(act)) {
676                                 ret = prev;
677                                 break;
678                         }
679                 }
680
681                 ModelAction *ret2 = get_last_fence_conflict(act);
682                 if (!ret2)
683                         return ret;
684                 if (!ret)
685                         return ret2;
686                 if (*ret < *ret2)
687                         return ret2;
688                 return ret;
689         }
690         case ATOMIC_LOCK:
691         case ATOMIC_TRYLOCK: {
692                 /* linear search: from most recent to oldest */
693                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
694                 action_list_t::reverse_iterator rit;
695                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
696                         ModelAction *prev = *rit;
697                         if (act->is_conflicting_lock(prev))
698                                 return prev;
699                 }
700                 break;
701         }
702         case ATOMIC_UNLOCK: {
703                 /* linear search: from most recent to oldest */
704                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
705                 action_list_t::reverse_iterator rit;
706                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
707                         ModelAction *prev = *rit;
708                         if (!act->same_thread(prev) && prev->is_failed_trylock())
709                                 return prev;
710                 }
711                 break;
712         }
713         case ATOMIC_WAIT: {
714                 /* linear search: from most recent to oldest */
715                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
716                 action_list_t::reverse_iterator rit;
717                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
718                         ModelAction *prev = *rit;
719                         if (!act->same_thread(prev) && prev->is_failed_trylock())
720                                 return prev;
721                         if (!act->same_thread(prev) && prev->is_notify())
722                                 return prev;
723                 }
724                 break;
725         }
726
727         case ATOMIC_NOTIFY_ALL:
728         case ATOMIC_NOTIFY_ONE: {
729                 /* linear search: from most recent to oldest */
730                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
731                 action_list_t::reverse_iterator rit;
732                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
733                         ModelAction *prev = *rit;
734                         if (!act->same_thread(prev) && prev->is_wait())
735                                 return prev;
736                 }
737                 break;
738         }
739         default:
740                 break;
741         }
742         return NULL;
743 }
744
745 /** This method finds backtracking points where we should try to
746  * reorder the parameter ModelAction against.
747  *
748  * @param the ModelAction to find backtracking points for.
749  */
750 void ModelChecker::set_backtracking(ModelAction *act)
751 {
752         Thread *t = get_thread(act);
753         ModelAction *prev = get_last_conflict(act);
754         if (prev == NULL)
755                 return;
756
757         Node *node = prev->get_node()->get_parent();
758
759         int low_tid, high_tid;
760         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
761                 low_tid = id_to_int(act->get_tid());
762                 high_tid = low_tid + 1;
763         } else {
764                 low_tid = 0;
765                 high_tid = get_num_threads();
766         }
767
768         for (int i = low_tid; i < high_tid; i++) {
769                 thread_id_t tid = int_to_id(i);
770
771                 /* Make sure this thread can be enabled here. */
772                 if (i >= node->get_num_threads())
773                         break;
774
775                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
776                 if (node->enabled_status(tid) != THREAD_ENABLED)
777                         continue;
778
779                 /* Check if this has been explored already */
780                 if (node->has_been_explored(tid))
781                         continue;
782
783                 /* See if fairness allows */
784                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
785                         bool unfair = false;
786                         for (int t = 0; t < node->get_num_threads(); t++) {
787                                 thread_id_t tother = int_to_id(t);
788                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
789                                         unfair = true;
790                                         break;
791                                 }
792                         }
793                         if (unfair)
794                                 continue;
795                 }
796                 /* Cache the latest backtracking point */
797                 set_latest_backtrack(prev);
798
799                 /* If this is a new backtracking point, mark the tree */
800                 if (!node->set_backtrack(tid))
801                         continue;
802                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
803                                         id_to_int(prev->get_tid()),
804                                         id_to_int(t->get_id()));
805                 if (DBG_ENABLED()) {
806                         prev->print();
807                         act->print();
808                 }
809         }
810 }
811
812 /**
813  * @brief Cache the a backtracking point as the "most recent", if eligible
814  *
815  * Note that this does not prepare the NodeStack for this backtracking
816  * operation, it only caches the action on a per-execution basis
817  *
818  * @param act The operation at which we should explore a different next action
819  * (i.e., backtracking point)
820  * @return True, if this action is now the most recent backtracking point;
821  * false otherwise
822  */
823 bool ModelChecker::set_latest_backtrack(ModelAction *act)
824 {
825         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
826                 priv->next_backtrack = act;
827                 return true;
828         }
829         return false;
830 }
831
832 /**
833  * Returns last backtracking point. The model checker will explore a different
834  * path for this point in the next execution.
835  * @return The ModelAction at which the next execution should diverge.
836  */
837 ModelAction * ModelChecker::get_next_backtrack()
838 {
839         ModelAction *next = priv->next_backtrack;
840         priv->next_backtrack = NULL;
841         return next;
842 }
843
844 /**
845  * Processes a read or rmw model action.
846  * @param curr is the read model action to process.
847  * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
848  * @return True if processing this read updates the mo_graph.
849  */
850 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
851 {
852         uint64_t value = VALUE_NONE;
853         bool updated = false;
854         while (true) {
855                 const ModelAction *reads_from = curr->get_node()->get_read_from();
856                 if (reads_from != NULL) {
857                         mo_graph->startChanges();
858
859                         value = reads_from->get_value();
860                         bool r_status = false;
861
862                         if (!second_part_of_rmw) {
863                                 check_recency(curr, reads_from);
864                                 r_status = r_modification_order(curr, reads_from);
865                         }
866
867                         if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
868                                 mo_graph->rollbackChanges();
869                                 priv->too_many_reads = false;
870                                 continue;
871                         }
872
873                         read_from(curr, reads_from);
874                         mo_graph->commitChanges();
875                         mo_check_promises(curr, true);
876
877                         updated |= r_status;
878                 } else if (!second_part_of_rmw) {
879                         /* Read from future value */
880                         struct future_value fv = curr->get_node()->get_future_value();
881                         Promise *promise = new Promise(curr, fv);
882                         value = fv.value;
883                         curr->set_read_from_promise(promise);
884                         promises->push_back(promise);
885                         mo_graph->startChanges();
886                         updated = r_modification_order(curr, promise);
887                         mo_graph->commitChanges();
888                 }
889                 get_thread(curr)->set_return_value(value);
890                 return updated;
891         }
892 }
893
894 /**
895  * Processes a lock, trylock, or unlock model action.  @param curr is
896  * the read model action to process.
897  *
898  * The try lock operation checks whether the lock is taken.  If not,
899  * it falls to the normal lock operation case.  If so, it returns
900  * fail.
901  *
902  * The lock operation has already been checked that it is enabled, so
903  * it just grabs the lock and synchronizes with the previous unlock.
904  *
905  * The unlock operation has to re-enable all of the threads that are
906  * waiting on the lock.
907  *
908  * @return True if synchronization was updated; false otherwise
909  */
910 bool ModelChecker::process_mutex(ModelAction *curr)
911 {
912         std::mutex *mutex = NULL;
913         struct std::mutex_state *state = NULL;
914
915         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
916                 mutex = (std::mutex *)curr->get_location();
917                 state = mutex->get_state();
918         } else if (curr->is_wait()) {
919                 mutex = (std::mutex *)curr->get_value();
920                 state = mutex->get_state();
921         }
922
923         switch (curr->get_type()) {
924         case ATOMIC_TRYLOCK: {
925                 bool success = !state->islocked;
926                 curr->set_try_lock(success);
927                 if (!success) {
928                         get_thread(curr)->set_return_value(0);
929                         break;
930                 }
931                 get_thread(curr)->set_return_value(1);
932         }
933                 //otherwise fall into the lock case
934         case ATOMIC_LOCK: {
935                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
936                         assert_bug("Lock access before initialization");
937                 state->islocked = true;
938                 ModelAction *unlock = get_last_unlock(curr);
939                 //synchronize with the previous unlock statement
940                 if (unlock != NULL) {
941                         curr->synchronize_with(unlock);
942                         return true;
943                 }
944                 break;
945         }
946         case ATOMIC_UNLOCK: {
947                 //unlock the lock
948                 state->islocked = false;
949                 //wake up the other threads
950                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
951                 //activate all the waiting threads
952                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
953                         scheduler->wake(get_thread(*rit));
954                 }
955                 waiters->clear();
956                 break;
957         }
958         case ATOMIC_WAIT: {
959                 //unlock the lock
960                 state->islocked = false;
961                 //wake up the other threads
962                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
963                 //activate all the waiting threads
964                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
965                         scheduler->wake(get_thread(*rit));
966                 }
967                 waiters->clear();
968                 //check whether we should go to sleep or not...simulate spurious failures
969                 if (curr->get_node()->get_misc() == 0) {
970                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
971                         //disable us
972                         scheduler->sleep(get_thread(curr));
973                 }
974                 break;
975         }
976         case ATOMIC_NOTIFY_ALL: {
977                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
978                 //activate all the waiting threads
979                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
980                         scheduler->wake(get_thread(*rit));
981                 }
982                 waiters->clear();
983                 break;
984         }
985         case ATOMIC_NOTIFY_ONE: {
986                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
987                 int wakeupthread = curr->get_node()->get_misc();
988                 action_list_t::iterator it = waiters->begin();
989                 advance(it, wakeupthread);
990                 scheduler->wake(get_thread(*it));
991                 waiters->erase(it);
992                 break;
993         }
994
995         default:
996                 ASSERT(0);
997         }
998         return false;
999 }
1000
1001 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1002 {
1003         /* Do more ambitious checks now that mo is more complete */
1004         if (mo_may_allow(writer, reader)) {
1005                 Node *node = reader->get_node();
1006
1007                 /* Find an ancestor thread which exists at the time of the reader */
1008                 Thread *write_thread = get_thread(writer);
1009                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1010                         write_thread = write_thread->get_parent();
1011
1012                 struct future_value fv = {
1013                         writer->get_value(),
1014                         writer->get_seq_number() + params.maxfuturedelay,
1015                         write_thread->get_id(),
1016                 };
1017                 if (node->add_future_value(fv))
1018                         set_latest_backtrack(reader);
1019         }
1020 }
1021
1022 /**
1023  * Process a write ModelAction
1024  * @param curr The ModelAction to process
1025  * @return True if the mo_graph was updated or promises were resolved
1026  */
1027 bool ModelChecker::process_write(ModelAction *curr)
1028 {
1029         bool updated_mod_order = w_modification_order(curr);
1030         bool updated_promises = resolve_promises(curr);
1031
1032         if (promises->size() == 0) {
1033                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1034                         struct PendingFutureValue pfv = (*futurevalues)[i];
1035                         add_future_value(pfv.writer, pfv.act);
1036                 }
1037                 futurevalues->clear();
1038         }
1039
1040         mo_graph->commitChanges();
1041         mo_check_promises(curr, false);
1042
1043         get_thread(curr)->set_return_value(VALUE_NONE);
1044         return updated_mod_order || updated_promises;
1045 }
1046
1047 /**
1048  * Process a fence ModelAction
1049  * @param curr The ModelAction to process
1050  * @return True if synchronization was updated
1051  */
1052 bool ModelChecker::process_fence(ModelAction *curr)
1053 {
1054         /*
1055          * fence-relaxed: no-op
1056          * fence-release: only log the occurence (not in this function), for
1057          *   use in later synchronization
1058          * fence-acquire (this function): search for hypothetical release
1059          *   sequences
1060          */
1061         bool updated = false;
1062         if (curr->is_acquire()) {
1063                 action_list_t *list = action_trace;
1064                 action_list_t::reverse_iterator rit;
1065                 /* Find X : is_read(X) && X --sb-> curr */
1066                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1067                         ModelAction *act = *rit;
1068                         if (act == curr)
1069                                 continue;
1070                         if (act->get_tid() != curr->get_tid())
1071                                 continue;
1072                         /* Stop at the beginning of the thread */
1073                         if (act->is_thread_start())
1074                                 break;
1075                         /* Stop once we reach a prior fence-acquire */
1076                         if (act->is_fence() && act->is_acquire())
1077                                 break;
1078                         if (!act->is_read())
1079                                 continue;
1080                         /* read-acquire will find its own release sequences */
1081                         if (act->is_acquire())
1082                                 continue;
1083
1084                         /* Establish hypothetical release sequences */
1085                         rel_heads_list_t release_heads;
1086                         get_release_seq_heads(curr, act, &release_heads);
1087                         for (unsigned int i = 0; i < release_heads.size(); i++)
1088                                 if (!curr->synchronize_with(release_heads[i]))
1089                                         set_bad_synchronization();
1090                         if (release_heads.size() != 0)
1091                                 updated = true;
1092                 }
1093         }
1094         return updated;
1095 }
1096
1097 /**
1098  * @brief Process the current action for thread-related activity
1099  *
1100  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1101  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1102  * synchronization, etc.  This function is a no-op for non-THREAD actions
1103  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1104  *
1105  * @param curr The current action
1106  * @return True if synchronization was updated or a thread completed
1107  */
1108 bool ModelChecker::process_thread_action(ModelAction *curr)
1109 {
1110         bool updated = false;
1111
1112         switch (curr->get_type()) {
1113         case THREAD_CREATE: {
1114                 thrd_t *thrd = (thrd_t *)curr->get_location();
1115                 struct thread_params *params = (struct thread_params *)curr->get_value();
1116                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1117                 add_thread(th);
1118                 th->set_creation(curr);
1119                 /* Promises can be satisfied by children */
1120                 for (unsigned int i = 0; i < promises->size(); i++) {
1121                         Promise *promise = (*promises)[i];
1122                         if (promise->thread_is_available(curr->get_tid()))
1123                                 promise->add_thread(th->get_id());
1124                 }
1125                 break;
1126         }
1127         case THREAD_JOIN: {
1128                 Thread *blocking = curr->get_thread_operand();
1129                 ModelAction *act = get_last_action(blocking->get_id());
1130                 curr->synchronize_with(act);
1131                 updated = true; /* trigger rel-seq checks */
1132                 break;
1133         }
1134         case THREAD_FINISH: {
1135                 Thread *th = get_thread(curr);
1136                 while (!th->wait_list_empty()) {
1137                         ModelAction *act = th->pop_wait_list();
1138                         scheduler->wake(get_thread(act));
1139                 }
1140                 th->complete();
1141                 /* Completed thread can't satisfy promises */
1142                 for (unsigned int i = 0; i < promises->size(); i++) {
1143                         Promise *promise = (*promises)[i];
1144                         if (promise->thread_is_available(th->get_id()))
1145                                 if (promise->eliminate_thread(th->get_id()))
1146                                         priv->failed_promise = true;
1147                 }
1148                 updated = true; /* trigger rel-seq checks */
1149                 break;
1150         }
1151         case THREAD_START: {
1152                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1153                 break;
1154         }
1155         default:
1156                 break;
1157         }
1158
1159         return updated;
1160 }
1161
1162 /**
1163  * @brief Process the current action for release sequence fixup activity
1164  *
1165  * Performs model-checker release sequence fixups for the current action,
1166  * forcing a single pending release sequence to break (with a given, potential
1167  * "loose" write) or to complete (i.e., synchronize). If a pending release
1168  * sequence forms a complete release sequence, then we must perform the fixup
1169  * synchronization, mo_graph additions, etc.
1170  *
1171  * @param curr The current action; must be a release sequence fixup action
1172  * @param work_queue The work queue to which to add work items as they are
1173  * generated
1174  */
1175 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1176 {
1177         const ModelAction *write = curr->get_node()->get_relseq_break();
1178         struct release_seq *sequence = pending_rel_seqs->back();
1179         pending_rel_seqs->pop_back();
1180         ASSERT(sequence);
1181         ModelAction *acquire = sequence->acquire;
1182         const ModelAction *rf = sequence->rf;
1183         const ModelAction *release = sequence->release;
1184         ASSERT(acquire);
1185         ASSERT(release);
1186         ASSERT(rf);
1187         ASSERT(release->same_thread(rf));
1188
1189         if (write == NULL) {
1190                 /**
1191                  * @todo Forcing a synchronization requires that we set
1192                  * modification order constraints. For instance, we can't allow
1193                  * a fixup sequence in which two separate read-acquire
1194                  * operations read from the same sequence, where the first one
1195                  * synchronizes and the other doesn't. Essentially, we can't
1196                  * allow any writes to insert themselves between 'release' and
1197                  * 'rf'
1198                  */
1199
1200                 /* Must synchronize */
1201                 if (!acquire->synchronize_with(release)) {
1202                         set_bad_synchronization();
1203                         return;
1204                 }
1205                 /* Re-check all pending release sequences */
1206                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1207                 /* Re-check act for mo_graph edges */
1208                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1209
1210                 /* propagate synchronization to later actions */
1211                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1212                 for (; (*rit) != acquire; rit++) {
1213                         ModelAction *propagate = *rit;
1214                         if (acquire->happens_before(propagate)) {
1215                                 propagate->synchronize_with(acquire);
1216                                 /* Re-check 'propagate' for mo_graph edges */
1217                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1218                         }
1219                 }
1220         } else {
1221                 /* Break release sequence with new edges:
1222                  *   release --mo--> write --mo--> rf */
1223                 mo_graph->addEdge(release, write);
1224                 mo_graph->addEdge(write, rf);
1225         }
1226
1227         /* See if we have realized a data race */
1228         checkDataRaces();
1229 }
1230
1231 /**
1232  * Initialize the current action by performing one or more of the following
1233  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1234  * in the NodeStack, manipulating backtracking sets, allocating and
1235  * initializing clock vectors, and computing the promises to fulfill.
1236  *
1237  * @param curr The current action, as passed from the user context; may be
1238  * freed/invalidated after the execution of this function, with a different
1239  * action "returned" its place (pass-by-reference)
1240  * @return True if curr is a newly-explored action; false otherwise
1241  */
1242 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1243 {
1244         ModelAction *newcurr;
1245
1246         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1247                 newcurr = process_rmw(*curr);
1248                 delete *curr;
1249
1250                 if (newcurr->is_rmw())
1251                         compute_promises(newcurr);
1252
1253                 *curr = newcurr;
1254                 return false;
1255         }
1256
1257         (*curr)->set_seq_number(get_next_seq_num());
1258
1259         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1260         if (newcurr) {
1261                 /* First restore type and order in case of RMW operation */
1262                 if ((*curr)->is_rmwr())
1263                         newcurr->copy_typeandorder(*curr);
1264
1265                 ASSERT((*curr)->get_location() == newcurr->get_location());
1266                 newcurr->copy_from_new(*curr);
1267
1268                 /* Discard duplicate ModelAction; use action from NodeStack */
1269                 delete *curr;
1270
1271                 /* Always compute new clock vector */
1272                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1273
1274                 *curr = newcurr;
1275                 return false; /* Action was explored previously */
1276         } else {
1277                 newcurr = *curr;
1278
1279                 /* Always compute new clock vector */
1280                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1281
1282                 /* Assign most recent release fence */
1283                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1284
1285                 /*
1286                  * Perform one-time actions when pushing new ModelAction onto
1287                  * NodeStack
1288                  */
1289                 if (newcurr->is_write())
1290                         compute_promises(newcurr);
1291                 else if (newcurr->is_relseq_fixup())
1292                         compute_relseq_breakwrites(newcurr);
1293                 else if (newcurr->is_wait())
1294                         newcurr->get_node()->set_misc_max(2);
1295                 else if (newcurr->is_notify_one()) {
1296                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1297                 }
1298                 return true; /* This was a new ModelAction */
1299         }
1300 }
1301
1302 /**
1303  * @brief Establish reads-from relation between two actions
1304  *
1305  * Perform basic operations involved with establishing a concrete rf relation,
1306  * including setting the ModelAction data and checking for release sequences.
1307  *
1308  * @param act The action that is reading (must be a read)
1309  * @param rf The action from which we are reading (must be a write)
1310  *
1311  * @return True if this read established synchronization
1312  */
1313 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1314 {
1315         act->set_read_from(rf);
1316         if (rf != NULL && act->is_acquire()) {
1317                 rel_heads_list_t release_heads;
1318                 get_release_seq_heads(act, act, &release_heads);
1319                 int num_heads = release_heads.size();
1320                 for (unsigned int i = 0; i < release_heads.size(); i++)
1321                         if (!act->synchronize_with(release_heads[i])) {
1322                                 set_bad_synchronization();
1323                                 num_heads--;
1324                         }
1325                 return num_heads > 0;
1326         }
1327         return false;
1328 }
1329
1330 /**
1331  * Check promises and eliminate potentially-satisfying threads when a thread is
1332  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1333  * no longer satisfy a promise generated from that thread.
1334  *
1335  * @param blocker The thread on which a thread is waiting
1336  * @param waiting The waiting thread
1337  */
1338 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1339 {
1340         for (unsigned int i = 0; i < promises->size(); i++) {
1341                 Promise *promise = (*promises)[i];
1342                 ModelAction *reader = promise->get_action();
1343                 if (reader->get_tid() != blocker->get_id())
1344                         continue;
1345                 if (!promise->thread_is_available(waiting->get_id()))
1346                         continue;
1347                 if (promise->eliminate_thread(waiting->get_id())) {
1348                         /* Promise has failed */
1349                         priv->failed_promise = true;
1350                 }
1351         }
1352 }
1353
1354 /**
1355  * @brief Check whether a model action is enabled.
1356  *
1357  * Checks whether a lock or join operation would be successful (i.e., is the
1358  * lock already locked, or is the joined thread already complete). If not, put
1359  * the action in a waiter list.
1360  *
1361  * @param curr is the ModelAction to check whether it is enabled.
1362  * @return a bool that indicates whether the action is enabled.
1363  */
1364 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1365         if (curr->is_lock()) {
1366                 std::mutex *lock = (std::mutex *)curr->get_location();
1367                 struct std::mutex_state *state = lock->get_state();
1368                 if (state->islocked) {
1369                         //Stick the action in the appropriate waiting queue
1370                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1371                         return false;
1372                 }
1373         } else if (curr->get_type() == THREAD_JOIN) {
1374                 Thread *blocking = (Thread *)curr->get_location();
1375                 if (!blocking->is_complete()) {
1376                         blocking->push_wait_list(curr);
1377                         thread_blocking_check_promises(blocking, get_thread(curr));
1378                         return false;
1379                 }
1380         }
1381
1382         return true;
1383 }
1384
1385 /**
1386  * This is the heart of the model checker routine. It performs model-checking
1387  * actions corresponding to a given "current action." Among other processes, it
1388  * calculates reads-from relationships, updates synchronization clock vectors,
1389  * forms a memory_order constraints graph, and handles replay/backtrack
1390  * execution when running permutations of previously-observed executions.
1391  *
1392  * @param curr The current action to process
1393  * @return The ModelAction that is actually executed; may be different than
1394  * curr; may be NULL, if the current action is not enabled to run
1395  */
1396 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1397 {
1398         ASSERT(curr);
1399         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1400
1401         if (!check_action_enabled(curr)) {
1402                 /* Make the execution look like we chose to run this action
1403                  * much later, when a lock/join can succeed */
1404                 get_thread(curr)->set_pending(curr);
1405                 scheduler->sleep(get_thread(curr));
1406                 return NULL;
1407         }
1408
1409         bool newly_explored = initialize_curr_action(&curr);
1410
1411         DBG();
1412         if (DBG_ENABLED())
1413                 curr->print();
1414
1415         wake_up_sleeping_actions(curr);
1416
1417         /* Add the action to lists before any other model-checking tasks */
1418         if (!second_part_of_rmw)
1419                 add_action_to_lists(curr);
1420
1421         /* Build may_read_from set for newly-created actions */
1422         if (newly_explored && curr->is_read())
1423                 build_may_read_from(curr);
1424
1425         /* Initialize work_queue with the "current action" work */
1426         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1427         while (!work_queue.empty() && !has_asserted()) {
1428                 WorkQueueEntry work = work_queue.front();
1429                 work_queue.pop_front();
1430
1431                 switch (work.type) {
1432                 case WORK_CHECK_CURR_ACTION: {
1433                         ModelAction *act = work.action;
1434                         bool update = false; /* update this location's release seq's */
1435                         bool update_all = false; /* update all release seq's */
1436
1437                         if (process_thread_action(curr))
1438                                 update_all = true;
1439
1440                         if (act->is_read() && process_read(act, second_part_of_rmw))
1441                                 update = true;
1442
1443                         if (act->is_write() && process_write(act))
1444                                 update = true;
1445
1446                         if (act->is_fence() && process_fence(act))
1447                                 update_all = true;
1448
1449                         if (act->is_mutex_op() && process_mutex(act))
1450                                 update_all = true;
1451
1452                         if (act->is_relseq_fixup())
1453                                 process_relseq_fixup(curr, &work_queue);
1454
1455                         if (update_all)
1456                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1457                         else if (update)
1458                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1459                         break;
1460                 }
1461                 case WORK_CHECK_RELEASE_SEQ:
1462                         resolve_release_sequences(work.location, &work_queue);
1463                         break;
1464                 case WORK_CHECK_MO_EDGES: {
1465                         /** @todo Complete verification of work_queue */
1466                         ModelAction *act = work.action;
1467                         bool updated = false;
1468
1469                         if (act->is_read()) {
1470                                 const ModelAction *rf = act->get_reads_from();
1471                                 const Promise *promise = act->get_reads_from_promise();
1472                                 if (rf) {
1473                                         if (r_modification_order(act, rf))
1474                                                 updated = true;
1475                                 } else if (promise) {
1476                                         if (r_modification_order(act, promise))
1477                                                 updated = true;
1478                                 }
1479                         }
1480                         if (act->is_write()) {
1481                                 if (w_modification_order(act))
1482                                         updated = true;
1483                         }
1484                         mo_graph->commitChanges();
1485
1486                         if (updated)
1487                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1488                         break;
1489                 }
1490                 default:
1491                         ASSERT(false);
1492                         break;
1493                 }
1494         }
1495
1496         check_curr_backtracking(curr);
1497         set_backtracking(curr);
1498         return curr;
1499 }
1500
1501 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1502 {
1503         Node *currnode = curr->get_node();
1504         Node *parnode = currnode->get_parent();
1505
1506         if ((parnode && !parnode->backtrack_empty()) ||
1507                          !currnode->misc_empty() ||
1508                          !currnode->read_from_empty() ||
1509                          !currnode->future_value_empty() ||
1510                          !currnode->promise_empty() ||
1511                          !currnode->relseq_break_empty()) {
1512                 set_latest_backtrack(curr);
1513         }
1514 }
1515
1516 bool ModelChecker::promises_expired() const
1517 {
1518         for (unsigned int i = 0; i < promises->size(); i++) {
1519                 Promise *promise = (*promises)[i];
1520                 if (promise->get_expiration() < priv->used_sequence_numbers)
1521                         return true;
1522         }
1523         return false;
1524 }
1525
1526 /**
1527  * This is the strongest feasibility check available.
1528  * @return whether the current trace (partial or complete) must be a prefix of
1529  * a feasible trace.
1530  */
1531 bool ModelChecker::isfeasibleprefix() const
1532 {
1533         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1534 }
1535
1536 /**
1537  * Print disagnostic information about an infeasible execution
1538  * @param prefix A string to prefix the output with; if NULL, then a default
1539  * message prefix will be provided
1540  */
1541 void ModelChecker::print_infeasibility(const char *prefix) const
1542 {
1543         char buf[100];
1544         char *ptr = buf;
1545         if (mo_graph->checkForCycles())
1546                 ptr += sprintf(ptr, "[mo cycle]");
1547         if (priv->failed_promise)
1548                 ptr += sprintf(ptr, "[failed promise]");
1549         if (priv->too_many_reads)
1550                 ptr += sprintf(ptr, "[too many reads]");
1551         if (priv->no_valid_reads)
1552                 ptr += sprintf(ptr, "[no valid reads-from]");
1553         if (priv->bad_synchronization)
1554                 ptr += sprintf(ptr, "[bad sw ordering]");
1555         if (promises_expired())
1556                 ptr += sprintf(ptr, "[promise expired]");
1557         if (promises->size() != 0)
1558                 ptr += sprintf(ptr, "[unresolved promise]");
1559         if (ptr != buf)
1560                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1561 }
1562
1563 /**
1564  * Returns whether the current completed trace is feasible, except for pending
1565  * release sequences.
1566  */
1567 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1568 {
1569         return !is_infeasible() && promises->size() == 0;
1570 }
1571
1572 /**
1573  * Check if the current partial trace is infeasible. Does not check any
1574  * end-of-execution flags, which might rule out the execution. Thus, this is
1575  * useful only for ruling an execution as infeasible.
1576  * @return whether the current partial trace is infeasible.
1577  */
1578 bool ModelChecker::is_infeasible() const
1579 {
1580         return mo_graph->checkForCycles() ||
1581                 priv->no_valid_reads ||
1582                 priv->failed_promise ||
1583                 priv->too_many_reads ||
1584                 priv->bad_synchronization ||
1585                 promises_expired();
1586 }
1587
1588 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1589 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1590         ModelAction *lastread = get_last_action(act->get_tid());
1591         lastread->process_rmw(act);
1592         if (act->is_rmw()) {
1593                 if (lastread->get_reads_from())
1594                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1595                 else
1596                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1597                 mo_graph->commitChanges();
1598         }
1599         return lastread;
1600 }
1601
1602 /**
1603  * Checks whether a thread has read from the same write for too many times
1604  * without seeing the effects of a later write.
1605  *
1606  * Basic idea:
1607  * 1) there must a different write that we could read from that would satisfy the modification order,
1608  * 2) we must have read from the same value in excess of maxreads times, and
1609  * 3) that other write must have been in the reads_from set for maxreads times.
1610  *
1611  * If so, we decide that the execution is no longer feasible.
1612  */
1613 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1614 {
1615         if (params.maxreads != 0) {
1616                 if (curr->get_node()->get_read_from_size() <= 1)
1617                         return;
1618                 //Must make sure that execution is currently feasible...  We could
1619                 //accidentally clear by rolling back
1620                 if (is_infeasible())
1621                         return;
1622                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1623                 int tid = id_to_int(curr->get_tid());
1624
1625                 /* Skip checks */
1626                 if ((int)thrd_lists->size() <= tid)
1627                         return;
1628                 action_list_t *list = &(*thrd_lists)[tid];
1629
1630                 action_list_t::reverse_iterator rit = list->rbegin();
1631                 /* Skip past curr */
1632                 for (; (*rit) != curr; rit++)
1633                         ;
1634                 /* go past curr now */
1635                 rit++;
1636
1637                 action_list_t::reverse_iterator ritcopy = rit;
1638                 //See if we have enough reads from the same value
1639                 int count = 0;
1640                 for (; count < params.maxreads; rit++, count++) {
1641                         if (rit == list->rend())
1642                                 return;
1643                         ModelAction *act = *rit;
1644                         if (!act->is_read())
1645                                 return;
1646
1647                         if (act->get_reads_from() != rf)
1648                                 return;
1649                         if (act->get_node()->get_read_from_size() <= 1)
1650                                 return;
1651                 }
1652                 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1653                         /* Get write */
1654                         const ModelAction *write = curr->get_node()->get_read_from_at(i);
1655
1656                         /* Need a different write */
1657                         if (write == rf)
1658                                 continue;
1659
1660                         /* Test to see whether this is a feasible write to read from */
1661                         /** NOTE: all members of read-from set should be
1662                          *  feasible, so we no longer check it here **/
1663
1664                         rit = ritcopy;
1665
1666                         bool feasiblewrite = true;
1667                         //new we need to see if this write works for everyone
1668
1669                         for (int loop = count; loop > 0; loop--, rit++) {
1670                                 ModelAction *act = *rit;
1671                                 bool foundvalue = false;
1672                                 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1673                                         if (act->get_node()->get_read_from_at(j) == write) {
1674                                                 foundvalue = true;
1675                                                 break;
1676                                         }
1677                                 }
1678                                 if (!foundvalue) {
1679                                         feasiblewrite = false;
1680                                         break;
1681                                 }
1682                         }
1683                         if (feasiblewrite) {
1684                                 priv->too_many_reads = true;
1685                                 return;
1686                         }
1687                 }
1688         }
1689 }
1690
1691 /**
1692  * Updates the mo_graph with the constraints imposed from the current
1693  * read.
1694  *
1695  * Basic idea is the following: Go through each other thread and find
1696  * the last action that happened before our read.  Two cases:
1697  *
1698  * (1) The action is a write => that write must either occur before
1699  * the write we read from or be the write we read from.
1700  *
1701  * (2) The action is a read => the write that that action read from
1702  * must occur before the write we read from or be the same write.
1703  *
1704  * @param curr The current action. Must be a read.
1705  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1706  * @return True if modification order edges were added; false otherwise
1707  */
1708 template <typename rf_type>
1709 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1710 {
1711         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1712         unsigned int i;
1713         bool added = false;
1714         ASSERT(curr->is_read());
1715
1716         /* Last SC fence in the current thread */
1717         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1718
1719         /* Iterate over all threads */
1720         for (i = 0; i < thrd_lists->size(); i++) {
1721                 /* Last SC fence in thread i */
1722                 ModelAction *last_sc_fence_thread_local = NULL;
1723                 if (int_to_id((int)i) != curr->get_tid())
1724                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1725
1726                 /* Last SC fence in thread i, before last SC fence in current thread */
1727                 ModelAction *last_sc_fence_thread_before = NULL;
1728                 if (last_sc_fence_local)
1729                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1730
1731                 /* Iterate over actions in thread, starting from most recent */
1732                 action_list_t *list = &(*thrd_lists)[i];
1733                 action_list_t::reverse_iterator rit;
1734                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1735                         ModelAction *act = *rit;
1736
1737                         if (act->is_write() && !act->equals(rf) && act != curr) {
1738                                 /* C++, Section 29.3 statement 5 */
1739                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1740                                                 *act < *last_sc_fence_thread_local) {
1741                                         added = mo_graph->addEdge(act, rf) || added;
1742                                         break;
1743                                 }
1744                                 /* C++, Section 29.3 statement 4 */
1745                                 else if (act->is_seqcst() && last_sc_fence_local &&
1746                                                 *act < *last_sc_fence_local) {
1747                                         added = mo_graph->addEdge(act, rf) || added;
1748                                         break;
1749                                 }
1750                                 /* C++, Section 29.3 statement 6 */
1751                                 else if (last_sc_fence_thread_before &&
1752                                                 *act < *last_sc_fence_thread_before) {
1753                                         added = mo_graph->addEdge(act, rf) || added;
1754                                         break;
1755                                 }
1756                         }
1757
1758                         /*
1759                          * Include at most one act per-thread that "happens
1760                          * before" curr. Don't consider reflexively.
1761                          */
1762                         if (act->happens_before(curr) && act != curr) {
1763                                 if (act->is_write()) {
1764                                         if (!act->equals(rf)) {
1765                                                 added = mo_graph->addEdge(act, rf) || added;
1766                                         }
1767                                 } else {
1768                                         const ModelAction *prevreadfrom = act->get_reads_from();
1769                                         //if the previous read is unresolved, keep going...
1770                                         if (prevreadfrom == NULL)
1771                                                 continue;
1772
1773                                         if (!prevreadfrom->equals(rf)) {
1774                                                 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1775                                         }
1776                                 }
1777                                 break;
1778                         }
1779                 }
1780         }
1781
1782         /*
1783          * All compatible, thread-exclusive promises must be ordered after any
1784          * concrete loads from the same thread
1785          */
1786         for (unsigned int i = 0; i < promises->size(); i++)
1787                 if ((*promises)[i]->is_compatible_exclusive(curr))
1788                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1789
1790         return added;
1791 }
1792
1793 /**
1794  * Updates the mo_graph with the constraints imposed from the current write.
1795  *
1796  * Basic idea is the following: Go through each other thread and find
1797  * the lastest action that happened before our write.  Two cases:
1798  *
1799  * (1) The action is a write => that write must occur before
1800  * the current write
1801  *
1802  * (2) The action is a read => the write that that action read from
1803  * must occur before the current write.
1804  *
1805  * This method also handles two other issues:
1806  *
1807  * (I) Sequential Consistency: Making sure that if the current write is
1808  * seq_cst, that it occurs after the previous seq_cst write.
1809  *
1810  * (II) Sending the write back to non-synchronizing reads.
1811  *
1812  * @param curr The current action. Must be a write.
1813  * @return True if modification order edges were added; false otherwise
1814  */
1815 bool ModelChecker::w_modification_order(ModelAction *curr)
1816 {
1817         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1818         unsigned int i;
1819         bool added = false;
1820         ASSERT(curr->is_write());
1821
1822         if (curr->is_seqcst()) {
1823                 /* We have to at least see the last sequentially consistent write,
1824                          so we are initialized. */
1825                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1826                 if (last_seq_cst != NULL) {
1827                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1828                 }
1829         }
1830
1831         /* Last SC fence in the current thread */
1832         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1833
1834         /* Iterate over all threads */
1835         for (i = 0; i < thrd_lists->size(); i++) {
1836                 /* Last SC fence in thread i, before last SC fence in current thread */
1837                 ModelAction *last_sc_fence_thread_before = NULL;
1838                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1839                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1840
1841                 /* Iterate over actions in thread, starting from most recent */
1842                 action_list_t *list = &(*thrd_lists)[i];
1843                 action_list_t::reverse_iterator rit;
1844                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1845                         ModelAction *act = *rit;
1846                         if (act == curr) {
1847                                 /*
1848                                  * 1) If RMW and it actually read from something, then we
1849                                  * already have all relevant edges, so just skip to next
1850                                  * thread.
1851                                  *
1852                                  * 2) If RMW and it didn't read from anything, we should
1853                                  * whatever edge we can get to speed up convergence.
1854                                  *
1855                                  * 3) If normal write, we need to look at earlier actions, so
1856                                  * continue processing list.
1857                                  */
1858                                 if (curr->is_rmw()) {
1859                                         if (curr->get_reads_from() != NULL)
1860                                                 break;
1861                                         else
1862                                                 continue;
1863                                 } else
1864                                         continue;
1865                         }
1866
1867                         /* C++, Section 29.3 statement 7 */
1868                         if (last_sc_fence_thread_before && act->is_write() &&
1869                                         *act < *last_sc_fence_thread_before) {
1870                                 added = mo_graph->addEdge(act, curr) || added;
1871                                 break;
1872                         }
1873
1874                         /*
1875                          * Include at most one act per-thread that "happens
1876                          * before" curr
1877                          */
1878                         if (act->happens_before(curr)) {
1879                                 /*
1880                                  * Note: if act is RMW, just add edge:
1881                                  *   act --mo--> curr
1882                                  * The following edge should be handled elsewhere:
1883                                  *   readfrom(act) --mo--> act
1884                                  */
1885                                 if (act->is_write())
1886                                         added = mo_graph->addEdge(act, curr) || added;
1887                                 else if (act->is_read()) {
1888                                         //if previous read accessed a null, just keep going
1889                                         if (act->get_reads_from() == NULL)
1890                                                 continue;
1891                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1892                                 }
1893                                 break;
1894                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1895                                                      !act->same_thread(curr)) {
1896                                 /* We have an action that:
1897                                    (1) did not happen before us
1898                                    (2) is a read and we are a write
1899                                    (3) cannot synchronize with us
1900                                    (4) is in a different thread
1901                                    =>
1902                                    that read could potentially read from our write.  Note that
1903                                    these checks are overly conservative at this point, we'll
1904                                    do more checks before actually removing the
1905                                    pendingfuturevalue.
1906
1907                                  */
1908                                 if (thin_air_constraint_may_allow(curr, act)) {
1909                                         if (!is_infeasible())
1910                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1911                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1912                                                 add_future_value(curr, act);
1913                                 }
1914                         }
1915                 }
1916         }
1917
1918         /*
1919          * All compatible, thread-exclusive promises must be ordered after any
1920          * concrete stores to the same thread, or else they can be merged with
1921          * this store later
1922          */
1923         for (unsigned int i = 0; i < promises->size(); i++)
1924                 if ((*promises)[i]->is_compatible_exclusive(curr))
1925                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1926
1927         return added;
1928 }
1929
1930 /** Arbitrary reads from the future are not allowed.  Section 29.3
1931  * part 9 places some constraints.  This method checks one result of constraint
1932  * constraint.  Others require compiler support. */
1933 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1934 {
1935         if (!writer->is_rmw())
1936                 return true;
1937
1938         if (!reader->is_rmw())
1939                 return true;
1940
1941         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1942                 if (search == reader)
1943                         return false;
1944                 if (search->get_tid() == reader->get_tid() &&
1945                                 search->happens_before(reader))
1946                         break;
1947         }
1948
1949         return true;
1950 }
1951
1952 /**
1953  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1954  * some constraints. This method checks one the following constraint (others
1955  * require compiler support):
1956  *
1957  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1958  */
1959 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1960 {
1961         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1962         unsigned int i;
1963         /* Iterate over all threads */
1964         for (i = 0; i < thrd_lists->size(); i++) {
1965                 const ModelAction *write_after_read = NULL;
1966
1967                 /* Iterate over actions in thread, starting from most recent */
1968                 action_list_t *list = &(*thrd_lists)[i];
1969                 action_list_t::reverse_iterator rit;
1970                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1971                         ModelAction *act = *rit;
1972
1973                         /* Don't disallow due to act == reader */
1974                         if (!reader->happens_before(act) || reader == act)
1975                                 break;
1976                         else if (act->is_write())
1977                                 write_after_read = act;
1978                         else if (act->is_read() && act->get_reads_from() != NULL)
1979                                 write_after_read = act->get_reads_from();
1980                 }
1981
1982                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1983                         return false;
1984         }
1985         return true;
1986 }
1987
1988 /**
1989  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1990  * The ModelAction under consideration is expected to be taking part in
1991  * release/acquire synchronization as an object of the "reads from" relation.
1992  * Note that this can only provide release sequence support for RMW chains
1993  * which do not read from the future, as those actions cannot be traced until
1994  * their "promise" is fulfilled. Similarly, we may not even establish the
1995  * presence of a release sequence with certainty, as some modification order
1996  * constraints may be decided further in the future. Thus, this function
1997  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1998  * and a boolean representing certainty.
1999  *
2000  * @param rf The action that might be part of a release sequence. Must be a
2001  * write.
2002  * @param release_heads A pass-by-reference style return parameter. After
2003  * execution of this function, release_heads will contain the heads of all the
2004  * relevant release sequences, if any exists with certainty
2005  * @param pending A pass-by-reference style return parameter which is only used
2006  * when returning false (i.e., uncertain). Returns most information regarding
2007  * an uncertain release sequence, including any write operations that might
2008  * break the sequence.
2009  * @return true, if the ModelChecker is certain that release_heads is complete;
2010  * false otherwise
2011  */
2012 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2013                 rel_heads_list_t *release_heads,
2014                 struct release_seq *pending) const
2015 {
2016         /* Only check for release sequences if there are no cycles */
2017         if (mo_graph->checkForCycles())
2018                 return false;
2019
2020         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2021                 ASSERT(rf->is_write());
2022
2023                 if (rf->is_release())
2024                         release_heads->push_back(rf);
2025                 else if (rf->get_last_fence_release())
2026                         release_heads->push_back(rf->get_last_fence_release());
2027                 if (!rf->is_rmw())
2028                         break; /* End of RMW chain */
2029
2030                 /** @todo Need to be smarter here...  In the linux lock
2031                  * example, this will run to the beginning of the program for
2032                  * every acquire. */
2033                 /** @todo The way to be smarter here is to keep going until 1
2034                  * thread has a release preceded by an acquire and you've seen
2035                  *       both. */
2036
2037                 /* acq_rel RMW is a sufficient stopping condition */
2038                 if (rf->is_acquire() && rf->is_release())
2039                         return true; /* complete */
2040         };
2041         if (!rf) {
2042                 /* read from future: need to settle this later */
2043                 pending->rf = NULL;
2044                 return false; /* incomplete */
2045         }
2046
2047         if (rf->is_release())
2048                 return true; /* complete */
2049
2050         /* else relaxed write
2051          * - check for fence-release in the same thread (29.8, stmt. 3)
2052          * - check modification order for contiguous subsequence
2053          *   -> rf must be same thread as release */
2054
2055         const ModelAction *fence_release = rf->get_last_fence_release();
2056         /* Synchronize with a fence-release unconditionally; we don't need to
2057          * find any more "contiguous subsequence..." for it */
2058         if (fence_release)
2059                 release_heads->push_back(fence_release);
2060
2061         int tid = id_to_int(rf->get_tid());
2062         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2063         action_list_t *list = &(*thrd_lists)[tid];
2064         action_list_t::const_reverse_iterator rit;
2065
2066         /* Find rf in the thread list */
2067         rit = std::find(list->rbegin(), list->rend(), rf);
2068         ASSERT(rit != list->rend());
2069
2070         /* Find the last {write,fence}-release */
2071         for (; rit != list->rend(); rit++) {
2072                 if (fence_release && *(*rit) < *fence_release)
2073                         break;
2074                 if ((*rit)->is_release())
2075                         break;
2076         }
2077         if (rit == list->rend()) {
2078                 /* No write-release in this thread */
2079                 return true; /* complete */
2080         } else if (fence_release && *(*rit) < *fence_release) {
2081                 /* The fence-release is more recent (and so, "stronger") than
2082                  * the most recent write-release */
2083                 return true; /* complete */
2084         } /* else, need to establish contiguous release sequence */
2085         ModelAction *release = *rit;
2086
2087         ASSERT(rf->same_thread(release));
2088
2089         pending->writes.clear();
2090
2091         bool certain = true;
2092         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2093                 if (id_to_int(rf->get_tid()) == (int)i)
2094                         continue;
2095                 list = &(*thrd_lists)[i];
2096
2097                 /* Can we ensure no future writes from this thread may break
2098                  * the release seq? */
2099                 bool future_ordered = false;
2100
2101                 ModelAction *last = get_last_action(int_to_id(i));
2102                 Thread *th = get_thread(int_to_id(i));
2103                 if ((last && rf->happens_before(last)) ||
2104                                 !is_enabled(th) ||
2105                                 th->is_complete())
2106                         future_ordered = true;
2107
2108                 ASSERT(!th->is_model_thread() || future_ordered);
2109
2110                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2111                         const ModelAction *act = *rit;
2112                         /* Reach synchronization -> this thread is complete */
2113                         if (act->happens_before(release))
2114                                 break;
2115                         if (rf->happens_before(act)) {
2116                                 future_ordered = true;
2117                                 continue;
2118                         }
2119
2120                         /* Only non-RMW writes can break release sequences */
2121                         if (!act->is_write() || act->is_rmw())
2122                                 continue;
2123
2124                         /* Check modification order */
2125                         if (mo_graph->checkReachable(rf, act)) {
2126                                 /* rf --mo--> act */
2127                                 future_ordered = true;
2128                                 continue;
2129                         }
2130                         if (mo_graph->checkReachable(act, release))
2131                                 /* act --mo--> release */
2132                                 break;
2133                         if (mo_graph->checkReachable(release, act) &&
2134                                       mo_graph->checkReachable(act, rf)) {
2135                                 /* release --mo-> act --mo--> rf */
2136                                 return true; /* complete */
2137                         }
2138                         /* act may break release sequence */
2139                         pending->writes.push_back(act);
2140                         certain = false;
2141                 }
2142                 if (!future_ordered)
2143                         certain = false; /* This thread is uncertain */
2144         }
2145
2146         if (certain) {
2147                 release_heads->push_back(release);
2148                 pending->writes.clear();
2149         } else {
2150                 pending->release = release;
2151                 pending->rf = rf;
2152         }
2153         return certain;
2154 }
2155
2156 /**
2157  * An interface for getting the release sequence head(s) with which a
2158  * given ModelAction must synchronize. This function only returns a non-empty
2159  * result when it can locate a release sequence head with certainty. Otherwise,
2160  * it may mark the internal state of the ModelChecker so that it will handle
2161  * the release sequence at a later time, causing @a acquire to update its
2162  * synchronization at some later point in execution.
2163  *
2164  * @param acquire The 'acquire' action that may synchronize with a release
2165  * sequence
2166  * @param read The read action that may read from a release sequence; this may
2167  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2168  * when 'acquire' is a fence-acquire)
2169  * @param release_heads A pass-by-reference return parameter. Will be filled
2170  * with the head(s) of the release sequence(s), if they exists with certainty.
2171  * @see ModelChecker::release_seq_heads
2172  */
2173 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2174                 ModelAction *read, rel_heads_list_t *release_heads)
2175 {
2176         const ModelAction *rf = read->get_reads_from();
2177         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2178         sequence->acquire = acquire;
2179         sequence->read = read;
2180
2181         if (!release_seq_heads(rf, release_heads, sequence)) {
2182                 /* add act to 'lazy checking' list */
2183                 pending_rel_seqs->push_back(sequence);
2184         } else {
2185                 snapshot_free(sequence);
2186         }
2187 }
2188
2189 /**
2190  * Attempt to resolve all stashed operations that might synchronize with a
2191  * release sequence for a given location. This implements the "lazy" portion of
2192  * determining whether or not a release sequence was contiguous, since not all
2193  * modification order information is present at the time an action occurs.
2194  *
2195  * @param location The location/object that should be checked for release
2196  * sequence resolutions. A NULL value means to check all locations.
2197  * @param work_queue The work queue to which to add work items as they are
2198  * generated
2199  * @return True if any updates occurred (new synchronization, new mo_graph
2200  * edges)
2201  */
2202 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2203 {
2204         bool updated = false;
2205         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2206         while (it != pending_rel_seqs->end()) {
2207                 struct release_seq *pending = *it;
2208                 ModelAction *acquire = pending->acquire;
2209                 const ModelAction *read = pending->read;
2210
2211                 /* Only resolve sequences on the given location, if provided */
2212                 if (location && read->get_location() != location) {
2213                         it++;
2214                         continue;
2215                 }
2216
2217                 const ModelAction *rf = read->get_reads_from();
2218                 rel_heads_list_t release_heads;
2219                 bool complete;
2220                 complete = release_seq_heads(rf, &release_heads, pending);
2221                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2222                         if (!acquire->has_synchronized_with(release_heads[i])) {
2223                                 if (acquire->synchronize_with(release_heads[i]))
2224                                         updated = true;
2225                                 else
2226                                         set_bad_synchronization();
2227                         }
2228                 }
2229
2230                 if (updated) {
2231                         /* Re-check all pending release sequences */
2232                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2233                         /* Re-check read-acquire for mo_graph edges */
2234                         if (acquire->is_read())
2235                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2236
2237                         /* propagate synchronization to later actions */
2238                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2239                         for (; (*rit) != acquire; rit++) {
2240                                 ModelAction *propagate = *rit;
2241                                 if (acquire->happens_before(propagate)) {
2242                                         propagate->synchronize_with(acquire);
2243                                         /* Re-check 'propagate' for mo_graph edges */
2244                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2245                                 }
2246                         }
2247                 }
2248                 if (complete) {
2249                         it = pending_rel_seqs->erase(it);
2250                         snapshot_free(pending);
2251                 } else {
2252                         it++;
2253                 }
2254         }
2255
2256         // If we resolved promises or data races, see if we have realized a data race.
2257         checkDataRaces();
2258
2259         return updated;
2260 }
2261
2262 /**
2263  * Performs various bookkeeping operations for the current ModelAction. For
2264  * instance, adds action to the per-object, per-thread action vector and to the
2265  * action trace list of all thread actions.
2266  *
2267  * @param act is the ModelAction to add.
2268  */
2269 void ModelChecker::add_action_to_lists(ModelAction *act)
2270 {
2271         int tid = id_to_int(act->get_tid());
2272         ModelAction *uninit = NULL;
2273         int uninit_id = -1;
2274         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2275         if (list->empty() && act->is_atomic_var()) {
2276                 uninit = new_uninitialized_action(act->get_location());
2277                 uninit_id = id_to_int(uninit->get_tid());
2278                 list->push_back(uninit);
2279         }
2280         list->push_back(act);
2281
2282         action_trace->push_back(act);
2283         if (uninit)
2284                 action_trace->push_front(uninit);
2285
2286         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2287         if (tid >= (int)vec->size())
2288                 vec->resize(priv->next_thread_id);
2289         (*vec)[tid].push_back(act);
2290         if (uninit)
2291                 (*vec)[uninit_id].push_front(uninit);
2292
2293         if ((int)thrd_last_action->size() <= tid)
2294                 thrd_last_action->resize(get_num_threads());
2295         (*thrd_last_action)[tid] = act;
2296         if (uninit)
2297                 (*thrd_last_action)[uninit_id] = uninit;
2298
2299         if (act->is_fence() && act->is_release()) {
2300                 if ((int)thrd_last_fence_release->size() <= tid)
2301                         thrd_last_fence_release->resize(get_num_threads());
2302                 (*thrd_last_fence_release)[tid] = act;
2303         }
2304
2305         if (act->is_wait()) {
2306                 void *mutex_loc = (void *) act->get_value();
2307                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2308
2309                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2310                 if (tid >= (int)vec->size())
2311                         vec->resize(priv->next_thread_id);
2312                 (*vec)[tid].push_back(act);
2313         }
2314 }
2315
2316 /**
2317  * @brief Get the last action performed by a particular Thread
2318  * @param tid The thread ID of the Thread in question
2319  * @return The last action in the thread
2320  */
2321 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2322 {
2323         int threadid = id_to_int(tid);
2324         if (threadid < (int)thrd_last_action->size())
2325                 return (*thrd_last_action)[id_to_int(tid)];
2326         else
2327                 return NULL;
2328 }
2329
2330 /**
2331  * @brief Get the last fence release performed by a particular Thread
2332  * @param tid The thread ID of the Thread in question
2333  * @return The last fence release in the thread, if one exists; NULL otherwise
2334  */
2335 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2336 {
2337         int threadid = id_to_int(tid);
2338         if (threadid < (int)thrd_last_fence_release->size())
2339                 return (*thrd_last_fence_release)[id_to_int(tid)];
2340         else
2341                 return NULL;
2342 }
2343
2344 /**
2345  * Gets the last memory_order_seq_cst write (in the total global sequence)
2346  * performed on a particular object (i.e., memory location), not including the
2347  * current action.
2348  * @param curr The current ModelAction; also denotes the object location to
2349  * check
2350  * @return The last seq_cst write
2351  */
2352 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2353 {
2354         void *location = curr->get_location();
2355         action_list_t *list = get_safe_ptr_action(obj_map, location);
2356         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2357         action_list_t::reverse_iterator rit;
2358         for (rit = list->rbegin(); rit != list->rend(); rit++)
2359                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2360                         return *rit;
2361         return NULL;
2362 }
2363
2364 /**
2365  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2366  * performed in a particular thread, prior to a particular fence.
2367  * @param tid The ID of the thread to check
2368  * @param before_fence The fence from which to begin the search; if NULL, then
2369  * search for the most recent fence in the thread.
2370  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2371  */
2372 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2373 {
2374         /* All fences should have NULL location */
2375         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2376         action_list_t::reverse_iterator rit = list->rbegin();
2377
2378         if (before_fence) {
2379                 for (; rit != list->rend(); rit++)
2380                         if (*rit == before_fence)
2381                                 break;
2382
2383                 ASSERT(*rit == before_fence);
2384                 rit++;
2385         }
2386
2387         for (; rit != list->rend(); rit++)
2388                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2389                         return *rit;
2390         return NULL;
2391 }
2392
2393 /**
2394  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2395  * location). This function identifies the mutex according to the current
2396  * action, which is presumed to perform on the same mutex.
2397  * @param curr The current ModelAction; also denotes the object location to
2398  * check
2399  * @return The last unlock operation
2400  */
2401 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2402 {
2403         void *location = curr->get_location();
2404         action_list_t *list = get_safe_ptr_action(obj_map, location);
2405         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2406         action_list_t::reverse_iterator rit;
2407         for (rit = list->rbegin(); rit != list->rend(); rit++)
2408                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2409                         return *rit;
2410         return NULL;
2411 }
2412
2413 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2414 {
2415         ModelAction *parent = get_last_action(tid);
2416         if (!parent)
2417                 parent = get_thread(tid)->get_creation();
2418         return parent;
2419 }
2420
2421 /**
2422  * Returns the clock vector for a given thread.
2423  * @param tid The thread whose clock vector we want
2424  * @return Desired clock vector
2425  */
2426 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2427 {
2428         return get_parent_action(tid)->get_cv();
2429 }
2430
2431 /**
2432  * Resolve a set of Promises with a current write. The set is provided in the
2433  * Node corresponding to @a write.
2434  * @param write The ModelAction that is fulfilling Promises
2435  * @return True if promises were resolved; false otherwise
2436  */
2437 bool ModelChecker::resolve_promises(ModelAction *write)
2438 {
2439         bool haveResolved = false;
2440         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2441         promise_list_t mustResolve, resolved;
2442
2443         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2444                 Promise *promise = (*promises)[promise_index];
2445                 if (write->get_node()->get_promise(i)) {
2446                         ModelAction *read = promise->get_action();
2447                         read_from(read, write);
2448                         //Make sure the promise's value matches the write's value
2449                         ASSERT(promise->is_compatible(write));
2450                         mo_graph->resolvePromise(read, write, &mustResolve);
2451
2452                         resolved.push_back(promise);
2453                         promises->erase(promises->begin() + promise_index);
2454                         actions_to_check.push_back(read);
2455
2456                         haveResolved = true;
2457                 } else
2458                         promise_index++;
2459         }
2460
2461         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2462                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2463                                 == resolved.end())
2464                         priv->failed_promise = true;
2465         }
2466         for (unsigned int i = 0; i < resolved.size(); i++)
2467                 delete resolved[i];
2468         //Check whether reading these writes has made threads unable to
2469         //resolve promises
2470
2471         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2472                 ModelAction *read = actions_to_check[i];
2473                 mo_check_promises(read, true);
2474         }
2475
2476         return haveResolved;
2477 }
2478
2479 /**
2480  * Compute the set of promises that could potentially be satisfied by this
2481  * action. Note that the set computation actually appears in the Node, not in
2482  * ModelChecker.
2483  * @param curr The ModelAction that may satisfy promises
2484  */
2485 void ModelChecker::compute_promises(ModelAction *curr)
2486 {
2487         for (unsigned int i = 0; i < promises->size(); i++) {
2488                 Promise *promise = (*promises)[i];
2489                 const ModelAction *act = promise->get_action();
2490                 ASSERT(act->is_read());
2491                 if (!act->happens_before(curr) &&
2492                                 !act->could_synchronize_with(curr) &&
2493                                 promise->is_compatible(curr) &&
2494                                 promise->get_value() == curr->get_value()) {
2495                         curr->get_node()->set_promise(i, act->is_rmw());
2496                 }
2497         }
2498 }
2499
2500 /** Checks promises in response to change in ClockVector Threads. */
2501 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2502 {
2503         for (unsigned int i = 0; i < promises->size(); i++) {
2504                 Promise *promise = (*promises)[i];
2505                 const ModelAction *act = promise->get_action();
2506                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2507                                 merge_cv->synchronized_since(act)) {
2508                         if (promise->eliminate_thread(tid)) {
2509                                 //Promise has failed
2510                                 priv->failed_promise = true;
2511                                 return;
2512                         }
2513                 }
2514         }
2515 }
2516
2517 void ModelChecker::check_promises_thread_disabled()
2518 {
2519         for (unsigned int i = 0; i < promises->size(); i++) {
2520                 Promise *promise = (*promises)[i];
2521                 if (promise->has_failed()) {
2522                         priv->failed_promise = true;
2523                         return;
2524                 }
2525         }
2526 }
2527
2528 /**
2529  * @brief Checks promises in response to addition to modification order for
2530  * threads.
2531  *
2532  * We test whether threads are still available for satisfying promises after an
2533  * addition to our modification order constraints. Those that are unavailable
2534  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2535  * that promise has failed.
2536  *
2537  * @param act The ModelAction which updated the modification order
2538  * @param is_read_check Should be true if act is a read and we must check for
2539  * updates to the store from which it read (there is a distinction here for
2540  * RMW's, which are both a load and a store)
2541  */
2542 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2543 {
2544         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2545
2546         for (unsigned int i = 0; i < promises->size(); i++) {
2547                 Promise *promise = (*promises)[i];
2548                 const ModelAction *pread = promise->get_action();
2549
2550                 // Is this promise on the same location?
2551                 if (!pread->same_var(write))
2552                         continue;
2553
2554                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2555                         priv->failed_promise = true;
2556                         return;
2557                 }
2558
2559                 // Don't do any lookups twice for the same thread
2560                 if (!promise->thread_is_available(act->get_tid()))
2561                         continue;
2562
2563                 if (mo_graph->checkReachable(promise, write)) {
2564                         if (mo_graph->checkPromise(write, promise)) {
2565                                 priv->failed_promise = true;
2566                                 return;
2567                         }
2568                 }
2569         }
2570 }
2571
2572 /**
2573  * Compute the set of writes that may break the current pending release
2574  * sequence. This information is extracted from previou release sequence
2575  * calculations.
2576  *
2577  * @param curr The current ModelAction. Must be a release sequence fixup
2578  * action.
2579  */
2580 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2581 {
2582         if (pending_rel_seqs->empty())
2583                 return;
2584
2585         struct release_seq *pending = pending_rel_seqs->back();
2586         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2587                 const ModelAction *write = pending->writes[i];
2588                 curr->get_node()->add_relseq_break(write);
2589         }
2590
2591         /* NULL means don't break the sequence; just synchronize */
2592         curr->get_node()->add_relseq_break(NULL);
2593 }
2594
2595 /**
2596  * Build up an initial set of all past writes that this 'read' action may read
2597  * from, as well as any previously-observed future values that must still be valid.
2598  *
2599  * @param curr is the current ModelAction that we are exploring; it must be a
2600  * 'read' operation.
2601  */
2602 void ModelChecker::build_may_read_from(ModelAction *curr)
2603 {
2604         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2605         unsigned int i;
2606         ASSERT(curr->is_read());
2607
2608         ModelAction *last_sc_write = NULL;
2609
2610         if (curr->is_seqcst())
2611                 last_sc_write = get_last_seq_cst_write(curr);
2612
2613         /* Iterate over all threads */
2614         for (i = 0; i < thrd_lists->size(); i++) {
2615                 /* Iterate over actions in thread, starting from most recent */
2616                 action_list_t *list = &(*thrd_lists)[i];
2617                 action_list_t::reverse_iterator rit;
2618                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2619                         ModelAction *act = *rit;
2620
2621                         /* Only consider 'write' actions */
2622                         if (!act->is_write() || act == curr)
2623                                 continue;
2624
2625                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2626                         bool allow_read = true;
2627
2628                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2629                                 allow_read = false;
2630                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2631                                 allow_read = false;
2632
2633                         if (allow_read) {
2634                                 /* Only add feasible reads */
2635                                 mo_graph->startChanges();
2636                                 r_modification_order(curr, act);
2637                                 if (!is_infeasible())
2638                                         curr->get_node()->add_read_from(act);
2639                                 mo_graph->rollbackChanges();
2640                         }
2641
2642                         /* Include at most one act per-thread that "happens before" curr */
2643                         if (act->happens_before(curr))
2644                                 break;
2645                 }
2646         }
2647
2648         /* Inherit existing, promised future values */
2649         for (i = 0; i < promises->size(); i++) {
2650                 const Promise *promise = (*promises)[i];
2651                 const ModelAction *promise_read = promise->get_action();
2652                 if (promise_read->same_var(curr)) {
2653                         /* Only add feasible future-values */
2654                         mo_graph->startChanges();
2655                         r_modification_order(curr, promise);
2656                         if (!is_infeasible()) {
2657                                 const struct future_value fv = promise->get_fv();
2658                                 curr->get_node()->add_future_value(fv);
2659                         }
2660                         mo_graph->rollbackChanges();
2661                 }
2662         }
2663
2664         /* We may find no valid may-read-from only if the execution is doomed */
2665         if (!curr->get_node()->get_read_from_size() && curr->get_node()->future_value_empty()) {
2666                 priv->no_valid_reads = true;
2667                 set_assert();
2668         }
2669
2670         if (DBG_ENABLED()) {
2671                 model_print("Reached read action:\n");
2672                 curr->print();
2673                 model_print("Printing may_read_from\n");
2674                 curr->get_node()->print_may_read_from();
2675                 model_print("End printing may_read_from\n");
2676         }
2677 }
2678
2679 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2680 {
2681         for ( ; write != NULL; write = write->get_reads_from()) {
2682                 /* UNINIT actions don't have a Node, and they never sleep */
2683                 if (write->is_uninitialized())
2684                         return true;
2685                 Node *prevnode = write->get_node()->get_parent();
2686
2687                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2688                 if (write->is_release() && thread_sleep)
2689                         return true;
2690                 if (!write->is_rmw())
2691                         return false;
2692         }
2693         return true;
2694 }
2695
2696 /**
2697  * @brief Create a new action representing an uninitialized atomic
2698  * @param location The memory location of the atomic object
2699  * @return A pointer to a new ModelAction
2700  */
2701 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2702 {
2703         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2704         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2705         act->create_cv(NULL);
2706         return act;
2707 }
2708
2709 static void print_list(action_list_t *list)
2710 {
2711         action_list_t::iterator it;
2712
2713         model_print("---------------------------------------------------------------------\n");
2714
2715         unsigned int hash = 0;
2716
2717         for (it = list->begin(); it != list->end(); it++) {
2718                 (*it)->print();
2719                 hash = hash^(hash<<3)^((*it)->hash());
2720         }
2721         model_print("HASH %u\n", hash);
2722         model_print("---------------------------------------------------------------------\n");
2723 }
2724
2725 #if SUPPORT_MOD_ORDER_DUMP
2726 void ModelChecker::dumpGraph(char *filename) const
2727 {
2728         char buffer[200];
2729         sprintf(buffer, "%s.dot", filename);
2730         FILE *file = fopen(buffer, "w");
2731         fprintf(file, "digraph %s {\n", filename);
2732         mo_graph->dumpNodes(file);
2733         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2734
2735         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2736                 ModelAction *action = *it;
2737                 if (action->is_read()) {
2738                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2739                         if (action->get_reads_from() != NULL)
2740                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2741                 }
2742                 if (thread_array[action->get_tid()] != NULL) {
2743                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2744                 }
2745
2746                 thread_array[action->get_tid()] = action;
2747         }
2748         fprintf(file, "}\n");
2749         model_free(thread_array);
2750         fclose(file);
2751 }
2752 #endif
2753
2754 /** @brief Prints an execution trace summary. */
2755 void ModelChecker::print_summary() const
2756 {
2757 #if SUPPORT_MOD_ORDER_DUMP
2758         char buffername[100];
2759         sprintf(buffername, "exec%04u", stats.num_total);
2760         mo_graph->dumpGraphToFile(buffername);
2761         sprintf(buffername, "graph%04u", stats.num_total);
2762         dumpGraph(buffername);
2763 #endif
2764
2765         model_print("Execution %d:", stats.num_total);
2766         if (isfeasibleprefix())
2767                 model_print("\n");
2768         else
2769                 print_infeasibility(" INFEASIBLE");
2770         print_list(action_trace);
2771         model_print("\n");
2772 }
2773
2774 /**
2775  * Add a Thread to the system for the first time. Should only be called once
2776  * per thread.
2777  * @param t The Thread to add
2778  */
2779 void ModelChecker::add_thread(Thread *t)
2780 {
2781         thread_map->put(id_to_int(t->get_id()), t);
2782         scheduler->add_thread(t);
2783 }
2784
2785 /**
2786  * Removes a thread from the scheduler.
2787  * @param the thread to remove.
2788  */
2789 void ModelChecker::remove_thread(Thread *t)
2790 {
2791         scheduler->remove_thread(t);
2792 }
2793
2794 /**
2795  * @brief Get a Thread reference by its ID
2796  * @param tid The Thread's ID
2797  * @return A Thread reference
2798  */
2799 Thread * ModelChecker::get_thread(thread_id_t tid) const
2800 {
2801         return thread_map->get(id_to_int(tid));
2802 }
2803
2804 /**
2805  * @brief Get a reference to the Thread in which a ModelAction was executed
2806  * @param act The ModelAction
2807  * @return A Thread reference
2808  */
2809 Thread * ModelChecker::get_thread(const ModelAction *act) const
2810 {
2811         return get_thread(act->get_tid());
2812 }
2813
2814 /**
2815  * @brief Check if a Thread is currently enabled
2816  * @param t The Thread to check
2817  * @return True if the Thread is currently enabled
2818  */
2819 bool ModelChecker::is_enabled(Thread *t) const
2820 {
2821         return scheduler->is_enabled(t);
2822 }
2823
2824 /**
2825  * @brief Check if a Thread is currently enabled
2826  * @param tid The ID of the Thread to check
2827  * @return True if the Thread is currently enabled
2828  */
2829 bool ModelChecker::is_enabled(thread_id_t tid) const
2830 {
2831         return scheduler->is_enabled(tid);
2832 }
2833
2834 /**
2835  * Switch from a model-checker context to a user-thread context. This is the
2836  * complement of ModelChecker::switch_to_master and must be called from the
2837  * model-checker context
2838  *
2839  * @param thread The user-thread to switch to
2840  */
2841 void ModelChecker::switch_from_master(Thread *thread)
2842 {
2843         scheduler->set_current_thread(thread);
2844         Thread::swap(&system_context, thread);
2845 }
2846
2847 /**
2848  * Switch from a user-context to the "master thread" context (a.k.a. system
2849  * context). This switch is made with the intention of exploring a particular
2850  * model-checking action (described by a ModelAction object). Must be called
2851  * from a user-thread context.
2852  *
2853  * @param act The current action that will be explored. May be NULL only if
2854  * trace is exiting via an assertion (see ModelChecker::set_assert and
2855  * ModelChecker::has_asserted).
2856  * @return Return the value returned by the current action
2857  */
2858 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2859 {
2860         DBG();
2861         Thread *old = thread_current();
2862         ASSERT(!old->get_pending());
2863         old->set_pending(act);
2864         if (Thread::swap(old, &system_context) < 0) {
2865                 perror("swap threads");
2866                 exit(EXIT_FAILURE);
2867         }
2868         return old->get_return_value();
2869 }
2870
2871 /**
2872  * Takes the next step in the execution, if possible.
2873  * @param curr The current step to take
2874  * @return Returns the next Thread to run, if any; NULL if this execution
2875  * should terminate
2876  */
2877 Thread * ModelChecker::take_step(ModelAction *curr)
2878 {
2879         Thread *curr_thrd = get_thread(curr);
2880         ASSERT(curr_thrd->get_state() == THREAD_READY);
2881
2882         curr = check_current_action(curr);
2883
2884         /* Infeasible -> don't take any more steps */
2885         if (is_infeasible())
2886                 return NULL;
2887         else if (isfeasibleprefix() && have_bug_reports()) {
2888                 set_assert();
2889                 return NULL;
2890         }
2891
2892         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2893                 return NULL;
2894
2895         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2896                 scheduler->remove_thread(curr_thrd);
2897
2898         Thread *next_thrd = get_next_thread(curr);
2899
2900         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2901                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2902
2903         return next_thrd;
2904 }
2905
2906 /** Wrapper to run the user's main function, with appropriate arguments */
2907 void user_main_wrapper(void *)
2908 {
2909         user_main(model->params.argc, model->params.argv);
2910 }
2911
2912 /** @brief Run ModelChecker for the user program */
2913 void ModelChecker::run()
2914 {
2915         do {
2916                 thrd_t user_thread;
2917                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2918                 add_thread(t);
2919
2920                 do {
2921                         /*
2922                          * Stash next pending action(s) for thread(s). There
2923                          * should only need to stash one thread's action--the
2924                          * thread which just took a step--plus the first step
2925                          * for any newly-created thread
2926                          */
2927                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2928                                 thread_id_t tid = int_to_id(i);
2929                                 Thread *thr = get_thread(tid);
2930                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2931                                         switch_from_master(thr);
2932                                 }
2933                         }
2934
2935                         /* Catch assertions from prior take_step or from
2936                          * between-ModelAction bugs (e.g., data races) */
2937                         if (has_asserted())
2938                                 break;
2939
2940                         /* Consume the next action for a Thread */
2941                         ModelAction *curr = t->get_pending();
2942                         t->set_pending(NULL);
2943                         t = take_step(curr);
2944                 } while (t && !t->is_model_thread());
2945
2946                 /*
2947                  * Launch end-of-execution release sequence fixups only when
2948                  * the execution is otherwise feasible AND there are:
2949                  *
2950                  * (1) pending release sequences
2951                  * (2) pending assertions that could be invalidated by a change
2952                  * in clock vectors (i.e., data races)
2953                  * (3) no pending promises
2954                  */
2955                 while (!pending_rel_seqs->empty() &&
2956                                 is_feasible_prefix_ignore_relseq() &&
2957                                 !unrealizedraces.empty()) {
2958                         model_print("*** WARNING: release sequence fixup action "
2959                                         "(%zu pending release seuqence(s)) ***\n",
2960                                         pending_rel_seqs->size());
2961                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2962                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2963                                         model_thread);
2964                         take_step(fixup);
2965                 };
2966         } while (next_execution());
2967
2968         model_print("******* Model-checking complete: *******\n");
2969         print_stats();
2970 }