73bafc004b6cf50d3dc94b940d3dc4005a68b93b
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from_past()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_future_value()) {
267                         /* The next node will try to read from a different future value. */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else if (nextnode->increment_relseq_break()) {
271                         /* The next node will try to resolve a release sequence differently */
272                         tid = next->get_tid();
273                         node_stack->pop_restofstack(2);
274                 } else {
275                         ASSERT(prevnode);
276                         /* Make a different thread execute for next step */
277                         scheduler->add_sleep(get_thread(next->get_tid()));
278                         tid = prevnode->get_next_backtrack();
279                         /* Make sure the backtracked thread isn't sleeping. */
280                         node_stack->pop_restofstack(1);
281                         if (diverge == earliest_diverge) {
282                                 earliest_diverge = prevnode->get_action();
283                         }
284                 }
285                 /* Start the round robin scheduler from this thread id */
286                 scheduler->set_scheduler_thread(tid);
287                 /* The correct sleep set is in the parent node. */
288                 execute_sleep_set();
289
290                 DEBUG("*** Divergence point ***\n");
291
292                 diverge = NULL;
293         } else {
294                 tid = next->get_tid();
295         }
296         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
297         ASSERT(tid != THREAD_ID_T_NONE);
298         return thread_map->get(id_to_int(tid));
299 }
300
301 /**
302  * We need to know what the next actions of all threads in the sleep
303  * set will be.  This method computes them and stores the actions at
304  * the corresponding thread object's pending action.
305  */
306
307 void ModelChecker::execute_sleep_set()
308 {
309         for (unsigned int i = 0; i < get_num_threads(); i++) {
310                 thread_id_t tid = int_to_id(i);
311                 Thread *thr = get_thread(tid);
312                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
313                         thr->get_pending()->set_sleep_flag();
314                 }
315         }
316 }
317
318 /**
319  * @brief Should the current action wake up a given thread?
320  *
321  * @param curr The current action
322  * @param thread The thread that we might wake up
323  * @return True, if we should wake up the sleeping thread; false otherwise
324  */
325 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
326 {
327         const ModelAction *asleep = thread->get_pending();
328         /* Don't allow partial RMW to wake anyone up */
329         if (curr->is_rmwr())
330                 return false;
331         /* Synchronizing actions may have been backtracked */
332         if (asleep->could_synchronize_with(curr))
333                 return true;
334         /* All acquire/release fences and fence-acquire/store-release */
335         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
336                 return true;
337         /* Fence-release + store can awake load-acquire on the same location */
338         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
339                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
340                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
341                         return true;
342         }
343         return false;
344 }
345
346 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
347 {
348         for (unsigned int i = 0; i < get_num_threads(); i++) {
349                 Thread *thr = get_thread(int_to_id(i));
350                 if (scheduler->is_sleep_set(thr)) {
351                         if (should_wake_up(curr, thr))
352                                 /* Remove this thread from sleep set */
353                                 scheduler->remove_sleep(thr);
354                 }
355         }
356 }
357
358 /** @brief Alert the model-checker that an incorrectly-ordered
359  * synchronization was made */
360 void ModelChecker::set_bad_synchronization()
361 {
362         priv->bad_synchronization = true;
363 }
364
365 /**
366  * Check whether the current trace has triggered an assertion which should halt
367  * its execution.
368  *
369  * @return True, if the execution should be aborted; false otherwise
370  */
371 bool ModelChecker::has_asserted() const
372 {
373         return priv->asserted;
374 }
375
376 /**
377  * Trigger a trace assertion which should cause this execution to be halted.
378  * This can be due to a detected bug or due to an infeasibility that should
379  * halt ASAP.
380  */
381 void ModelChecker::set_assert()
382 {
383         priv->asserted = true;
384 }
385
386 /**
387  * Check if we are in a deadlock. Should only be called at the end of an
388  * execution, although it should not give false positives in the middle of an
389  * execution (there should be some ENABLED thread).
390  *
391  * @return True if program is in a deadlock; false otherwise
392  */
393 bool ModelChecker::is_deadlocked() const
394 {
395         bool blocking_threads = false;
396         for (unsigned int i = 0; i < get_num_threads(); i++) {
397                 thread_id_t tid = int_to_id(i);
398                 if (is_enabled(tid))
399                         return false;
400                 Thread *t = get_thread(tid);
401                 if (!t->is_model_thread() && t->get_pending())
402                         blocking_threads = true;
403         }
404         return blocking_threads;
405 }
406
407 /**
408  * Check if this is a complete execution. That is, have all thread completed
409  * execution (rather than exiting because sleep sets have forced a redundant
410  * execution).
411  *
412  * @return True if the execution is complete.
413  */
414 bool ModelChecker::is_complete_execution() const
415 {
416         for (unsigned int i = 0; i < get_num_threads(); i++)
417                 if (is_enabled(int_to_id(i)))
418                         return false;
419         return true;
420 }
421
422 /**
423  * @brief Assert a bug in the executing program.
424  *
425  * Use this function to assert any sort of bug in the user program. If the
426  * current trace is feasible (actually, a prefix of some feasible execution),
427  * then this execution will be aborted, printing the appropriate message. If
428  * the current trace is not yet feasible, the error message will be stashed and
429  * printed if the execution ever becomes feasible.
430  *
431  * @param msg Descriptive message for the bug (do not include newline char)
432  * @return True if bug is immediately-feasible
433  */
434 bool ModelChecker::assert_bug(const char *msg)
435 {
436         priv->bugs.push_back(new bug_message(msg));
437
438         if (isfeasibleprefix()) {
439                 set_assert();
440                 return true;
441         }
442         return false;
443 }
444
445 /**
446  * @brief Assert a bug in the executing program, asserted by a user thread
447  * @see ModelChecker::assert_bug
448  * @param msg Descriptive message for the bug (do not include newline char)
449  */
450 void ModelChecker::assert_user_bug(const char *msg)
451 {
452         /* If feasible bug, bail out now */
453         if (assert_bug(msg))
454                 switch_to_master(NULL);
455 }
456
457 /** @return True, if any bugs have been reported for this execution */
458 bool ModelChecker::have_bug_reports() const
459 {
460         return priv->bugs.size() != 0;
461 }
462
463 /** @brief Print bug report listing for this execution (if any bugs exist) */
464 void ModelChecker::print_bugs() const
465 {
466         if (have_bug_reports()) {
467                 model_print("Bug report: %zu bug%s detected\n",
468                                 priv->bugs.size(),
469                                 priv->bugs.size() > 1 ? "s" : "");
470                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
471                         priv->bugs[i]->print();
472         }
473 }
474
475 /**
476  * @brief Record end-of-execution stats
477  *
478  * Must be run when exiting an execution. Records various stats.
479  * @see struct execution_stats
480  */
481 void ModelChecker::record_stats()
482 {
483         stats.num_total++;
484         if (!isfeasibleprefix())
485                 stats.num_infeasible++;
486         else if (have_bug_reports())
487                 stats.num_buggy_executions++;
488         else if (is_complete_execution())
489                 stats.num_complete++;
490         else
491                 stats.num_redundant++;
492 }
493
494 /** @brief Print execution stats */
495 void ModelChecker::print_stats() const
496 {
497         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
498         model_print("Number of redundant executions: %d\n", stats.num_redundant);
499         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
500         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
501         model_print("Total executions: %d\n", stats.num_total);
502         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
503 }
504
505 /**
506  * @brief End-of-exeuction print
507  * @param printbugs Should any existing bugs be printed?
508  */
509 void ModelChecker::print_execution(bool printbugs) const
510 {
511         print_program_output();
512
513         if (DBG_ENABLED() || params.verbose) {
514                 model_print("Earliest divergence point since last feasible execution:\n");
515                 if (earliest_diverge)
516                         earliest_diverge->print();
517                 else
518                         model_print("(Not set)\n");
519
520                 model_print("\n");
521                 print_stats();
522         }
523
524         /* Don't print invalid bugs */
525         if (printbugs)
526                 print_bugs();
527
528         model_print("\n");
529         print_summary();
530 }
531
532 /**
533  * Queries the model-checker for more executions to explore and, if one
534  * exists, resets the model-checker state to execute a new execution.
535  *
536  * @return If there are more executions to explore, return true. Otherwise,
537  * return false.
538  */
539 bool ModelChecker::next_execution()
540 {
541         DBG();
542         /* Is this execution a feasible execution that's worth bug-checking? */
543         bool complete = isfeasibleprefix() && (is_complete_execution() ||
544                         have_bug_reports());
545
546         /* End-of-execution bug checks */
547         if (complete) {
548                 if (is_deadlocked())
549                         assert_bug("Deadlock detected");
550
551                 checkDataRaces();
552         }
553
554         record_stats();
555
556         /* Output */
557         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
558                 print_execution(complete);
559         else
560                 clear_program_output();
561
562         if (complete)
563                 earliest_diverge = NULL;
564
565         if ((diverge = get_next_backtrack()) == NULL)
566                 return false;
567
568         if (DBG_ENABLED()) {
569                 model_print("Next execution will diverge at:\n");
570                 diverge->print();
571         }
572
573         reset_to_initial_state();
574         return true;
575 }
576
577 /**
578  * @brief Find the last fence-related backtracking conflict for a ModelAction
579  *
580  * This function performs the search for the most recent conflicting action
581  * against which we should perform backtracking, as affected by fence
582  * operations. This includes pairs of potentially-synchronizing actions which
583  * occur due to fence-acquire or fence-release, and hence should be explored in
584  * the opposite execution order.
585  *
586  * @param act The current action
587  * @return The most recent action which conflicts with act due to fences
588  */
589 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
590 {
591         /* Only perform release/acquire fence backtracking for stores */
592         if (!act->is_write())
593                 return NULL;
594
595         /* Find a fence-release (or, act is a release) */
596         ModelAction *last_release;
597         if (act->is_release())
598                 last_release = act;
599         else
600                 last_release = get_last_fence_release(act->get_tid());
601         if (!last_release)
602                 return NULL;
603
604         /* Skip past the release */
605         action_list_t *list = action_trace;
606         action_list_t::reverse_iterator rit;
607         for (rit = list->rbegin(); rit != list->rend(); rit++)
608                 if (*rit == last_release)
609                         break;
610         ASSERT(rit != list->rend());
611
612         /* Find a prior:
613          *   load-acquire
614          * or
615          *   load --sb-> fence-acquire */
616         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
617         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
618         bool found_acquire_fences = false;
619         for ( ; rit != list->rend(); rit++) {
620                 ModelAction *prev = *rit;
621                 if (act->same_thread(prev))
622                         continue;
623
624                 int tid = id_to_int(prev->get_tid());
625
626                 if (prev->is_read() && act->same_var(prev)) {
627                         if (prev->is_acquire()) {
628                                 /* Found most recent load-acquire, don't need
629                                  * to search for more fences */
630                                 if (!found_acquire_fences)
631                                         return NULL;
632                         } else {
633                                 prior_loads[tid] = prev;
634                         }
635                 }
636                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
637                         found_acquire_fences = true;
638                         acquire_fences[tid] = prev;
639                 }
640         }
641
642         ModelAction *latest_backtrack = NULL;
643         for (unsigned int i = 0; i < acquire_fences.size(); i++)
644                 if (acquire_fences[i] && prior_loads[i])
645                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
646                                 latest_backtrack = acquire_fences[i];
647         return latest_backtrack;
648 }
649
650 /**
651  * @brief Find the last backtracking conflict for a ModelAction
652  *
653  * This function performs the search for the most recent conflicting action
654  * against which we should perform backtracking. This primary includes pairs of
655  * synchronizing actions which should be explored in the opposite execution
656  * order.
657  *
658  * @param act The current action
659  * @return The most recent action which conflicts with act
660  */
661 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
662 {
663         switch (act->get_type()) {
664         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
665         case ATOMIC_READ:
666         case ATOMIC_WRITE:
667         case ATOMIC_RMW: {
668                 ModelAction *ret = NULL;
669
670                 /* linear search: from most recent to oldest */
671                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
672                 action_list_t::reverse_iterator rit;
673                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
674                         ModelAction *prev = *rit;
675                         if (prev->could_synchronize_with(act)) {
676                                 ret = prev;
677                                 break;
678                         }
679                 }
680
681                 ModelAction *ret2 = get_last_fence_conflict(act);
682                 if (!ret2)
683                         return ret;
684                 if (!ret)
685                         return ret2;
686                 if (*ret < *ret2)
687                         return ret2;
688                 return ret;
689         }
690         case ATOMIC_LOCK:
691         case ATOMIC_TRYLOCK: {
692                 /* linear search: from most recent to oldest */
693                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
694                 action_list_t::reverse_iterator rit;
695                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
696                         ModelAction *prev = *rit;
697                         if (act->is_conflicting_lock(prev))
698                                 return prev;
699                 }
700                 break;
701         }
702         case ATOMIC_UNLOCK: {
703                 /* linear search: from most recent to oldest */
704                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
705                 action_list_t::reverse_iterator rit;
706                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
707                         ModelAction *prev = *rit;
708                         if (!act->same_thread(prev) && prev->is_failed_trylock())
709                                 return prev;
710                 }
711                 break;
712         }
713         case ATOMIC_WAIT: {
714                 /* linear search: from most recent to oldest */
715                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
716                 action_list_t::reverse_iterator rit;
717                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
718                         ModelAction *prev = *rit;
719                         if (!act->same_thread(prev) && prev->is_failed_trylock())
720                                 return prev;
721                         if (!act->same_thread(prev) && prev->is_notify())
722                                 return prev;
723                 }
724                 break;
725         }
726
727         case ATOMIC_NOTIFY_ALL:
728         case ATOMIC_NOTIFY_ONE: {
729                 /* linear search: from most recent to oldest */
730                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
731                 action_list_t::reverse_iterator rit;
732                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
733                         ModelAction *prev = *rit;
734                         if (!act->same_thread(prev) && prev->is_wait())
735                                 return prev;
736                 }
737                 break;
738         }
739         default:
740                 break;
741         }
742         return NULL;
743 }
744
745 /** This method finds backtracking points where we should try to
746  * reorder the parameter ModelAction against.
747  *
748  * @param the ModelAction to find backtracking points for.
749  */
750 void ModelChecker::set_backtracking(ModelAction *act)
751 {
752         Thread *t = get_thread(act);
753         ModelAction *prev = get_last_conflict(act);
754         if (prev == NULL)
755                 return;
756
757         Node *node = prev->get_node()->get_parent();
758
759         int low_tid, high_tid;
760         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
761                 low_tid = id_to_int(act->get_tid());
762                 high_tid = low_tid + 1;
763         } else {
764                 low_tid = 0;
765                 high_tid = get_num_threads();
766         }
767
768         for (int i = low_tid; i < high_tid; i++) {
769                 thread_id_t tid = int_to_id(i);
770
771                 /* Make sure this thread can be enabled here. */
772                 if (i >= node->get_num_threads())
773                         break;
774
775                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
776                 if (node->enabled_status(tid) != THREAD_ENABLED)
777                         continue;
778
779                 /* Check if this has been explored already */
780                 if (node->has_been_explored(tid))
781                         continue;
782
783                 /* See if fairness allows */
784                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
785                         bool unfair = false;
786                         for (int t = 0; t < node->get_num_threads(); t++) {
787                                 thread_id_t tother = int_to_id(t);
788                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
789                                         unfair = true;
790                                         break;
791                                 }
792                         }
793                         if (unfair)
794                                 continue;
795                 }
796                 /* Cache the latest backtracking point */
797                 set_latest_backtrack(prev);
798
799                 /* If this is a new backtracking point, mark the tree */
800                 if (!node->set_backtrack(tid))
801                         continue;
802                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
803                                         id_to_int(prev->get_tid()),
804                                         id_to_int(t->get_id()));
805                 if (DBG_ENABLED()) {
806                         prev->print();
807                         act->print();
808                 }
809         }
810 }
811
812 /**
813  * @brief Cache the a backtracking point as the "most recent", if eligible
814  *
815  * Note that this does not prepare the NodeStack for this backtracking
816  * operation, it only caches the action on a per-execution basis
817  *
818  * @param act The operation at which we should explore a different next action
819  * (i.e., backtracking point)
820  * @return True, if this action is now the most recent backtracking point;
821  * false otherwise
822  */
823 bool ModelChecker::set_latest_backtrack(ModelAction *act)
824 {
825         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
826                 priv->next_backtrack = act;
827                 return true;
828         }
829         return false;
830 }
831
832 /**
833  * Returns last backtracking point. The model checker will explore a different
834  * path for this point in the next execution.
835  * @return The ModelAction at which the next execution should diverge.
836  */
837 ModelAction * ModelChecker::get_next_backtrack()
838 {
839         ModelAction *next = priv->next_backtrack;
840         priv->next_backtrack = NULL;
841         return next;
842 }
843
844 /**
845  * Processes a read model action.
846  * @param curr is the read model action to process.
847  * @return True if processing this read updates the mo_graph.
848  */
849 bool ModelChecker::process_read(ModelAction *curr)
850 {
851         Node *node = curr->get_node();
852         uint64_t value = VALUE_NONE;
853         bool updated = false;
854         while (true) {
855                 const ModelAction *rf = node->get_read_from_past();
856                 if (rf != NULL) {
857                         mo_graph->startChanges();
858
859                         value = rf->get_value();
860
861                         check_recency(curr, rf);
862                         bool r_status = r_modification_order(curr, rf);
863
864                         if (is_infeasible() && (node->increment_read_from_past() || node->increment_future_value())) {
865                                 mo_graph->rollbackChanges();
866                                 priv->too_many_reads = false;
867                                 continue;
868                         }
869
870                         read_from(curr, rf);
871                         mo_graph->commitChanges();
872                         mo_check_promises(curr, true);
873
874                         updated |= r_status;
875                 } else {
876                         /* Read from future value */
877                         struct future_value fv = node->get_future_value();
878                         Promise *promise = new Promise(curr, fv);
879                         value = fv.value;
880                         curr->set_read_from_promise(promise);
881                         promises->push_back(promise);
882                         mo_graph->startChanges();
883                         updated = r_modification_order(curr, promise);
884                         mo_graph->commitChanges();
885                 }
886                 get_thread(curr)->set_return_value(value);
887                 return updated;
888         }
889 }
890
891 /**
892  * Processes a lock, trylock, or unlock model action.  @param curr is
893  * the read model action to process.
894  *
895  * The try lock operation checks whether the lock is taken.  If not,
896  * it falls to the normal lock operation case.  If so, it returns
897  * fail.
898  *
899  * The lock operation has already been checked that it is enabled, so
900  * it just grabs the lock and synchronizes with the previous unlock.
901  *
902  * The unlock operation has to re-enable all of the threads that are
903  * waiting on the lock.
904  *
905  * @return True if synchronization was updated; false otherwise
906  */
907 bool ModelChecker::process_mutex(ModelAction *curr)
908 {
909         std::mutex *mutex = NULL;
910         struct std::mutex_state *state = NULL;
911
912         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
913                 mutex = (std::mutex *)curr->get_location();
914                 state = mutex->get_state();
915         } else if (curr->is_wait()) {
916                 mutex = (std::mutex *)curr->get_value();
917                 state = mutex->get_state();
918         }
919
920         switch (curr->get_type()) {
921         case ATOMIC_TRYLOCK: {
922                 bool success = !state->islocked;
923                 curr->set_try_lock(success);
924                 if (!success) {
925                         get_thread(curr)->set_return_value(0);
926                         break;
927                 }
928                 get_thread(curr)->set_return_value(1);
929         }
930                 //otherwise fall into the lock case
931         case ATOMIC_LOCK: {
932                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
933                         assert_bug("Lock access before initialization");
934                 state->islocked = true;
935                 ModelAction *unlock = get_last_unlock(curr);
936                 //synchronize with the previous unlock statement
937                 if (unlock != NULL) {
938                         curr->synchronize_with(unlock);
939                         return true;
940                 }
941                 break;
942         }
943         case ATOMIC_UNLOCK: {
944                 //unlock the lock
945                 state->islocked = false;
946                 //wake up the other threads
947                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
948                 //activate all the waiting threads
949                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
950                         scheduler->wake(get_thread(*rit));
951                 }
952                 waiters->clear();
953                 break;
954         }
955         case ATOMIC_WAIT: {
956                 //unlock the lock
957                 state->islocked = false;
958                 //wake up the other threads
959                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
960                 //activate all the waiting threads
961                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
962                         scheduler->wake(get_thread(*rit));
963                 }
964                 waiters->clear();
965                 //check whether we should go to sleep or not...simulate spurious failures
966                 if (curr->get_node()->get_misc() == 0) {
967                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
968                         //disable us
969                         scheduler->sleep(get_thread(curr));
970                 }
971                 break;
972         }
973         case ATOMIC_NOTIFY_ALL: {
974                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
975                 //activate all the waiting threads
976                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
977                         scheduler->wake(get_thread(*rit));
978                 }
979                 waiters->clear();
980                 break;
981         }
982         case ATOMIC_NOTIFY_ONE: {
983                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
984                 int wakeupthread = curr->get_node()->get_misc();
985                 action_list_t::iterator it = waiters->begin();
986                 advance(it, wakeupthread);
987                 scheduler->wake(get_thread(*it));
988                 waiters->erase(it);
989                 break;
990         }
991
992         default:
993                 ASSERT(0);
994         }
995         return false;
996 }
997
998 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
999 {
1000         /* Do more ambitious checks now that mo is more complete */
1001         if (mo_may_allow(writer, reader)) {
1002                 Node *node = reader->get_node();
1003
1004                 /* Find an ancestor thread which exists at the time of the reader */
1005                 Thread *write_thread = get_thread(writer);
1006                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1007                         write_thread = write_thread->get_parent();
1008
1009                 struct future_value fv = {
1010                         writer->get_value(),
1011                         writer->get_seq_number() + params.maxfuturedelay,
1012                         write_thread->get_id(),
1013                 };
1014                 if (node->add_future_value(fv))
1015                         set_latest_backtrack(reader);
1016         }
1017 }
1018
1019 /**
1020  * Process a write ModelAction
1021  * @param curr The ModelAction to process
1022  * @return True if the mo_graph was updated or promises were resolved
1023  */
1024 bool ModelChecker::process_write(ModelAction *curr)
1025 {
1026         bool updated_mod_order = w_modification_order(curr);
1027         bool updated_promises = resolve_promises(curr);
1028
1029         if (promises->size() == 0) {
1030                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1031                         struct PendingFutureValue pfv = (*futurevalues)[i];
1032                         add_future_value(pfv.writer, pfv.act);
1033                 }
1034                 futurevalues->clear();
1035         }
1036
1037         mo_graph->commitChanges();
1038         mo_check_promises(curr, false);
1039
1040         get_thread(curr)->set_return_value(VALUE_NONE);
1041         return updated_mod_order || updated_promises;
1042 }
1043
1044 /**
1045  * Process a fence ModelAction
1046  * @param curr The ModelAction to process
1047  * @return True if synchronization was updated
1048  */
1049 bool ModelChecker::process_fence(ModelAction *curr)
1050 {
1051         /*
1052          * fence-relaxed: no-op
1053          * fence-release: only log the occurence (not in this function), for
1054          *   use in later synchronization
1055          * fence-acquire (this function): search for hypothetical release
1056          *   sequences
1057          */
1058         bool updated = false;
1059         if (curr->is_acquire()) {
1060                 action_list_t *list = action_trace;
1061                 action_list_t::reverse_iterator rit;
1062                 /* Find X : is_read(X) && X --sb-> curr */
1063                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1064                         ModelAction *act = *rit;
1065                         if (act == curr)
1066                                 continue;
1067                         if (act->get_tid() != curr->get_tid())
1068                                 continue;
1069                         /* Stop at the beginning of the thread */
1070                         if (act->is_thread_start())
1071                                 break;
1072                         /* Stop once we reach a prior fence-acquire */
1073                         if (act->is_fence() && act->is_acquire())
1074                                 break;
1075                         if (!act->is_read())
1076                                 continue;
1077                         /* read-acquire will find its own release sequences */
1078                         if (act->is_acquire())
1079                                 continue;
1080
1081                         /* Establish hypothetical release sequences */
1082                         rel_heads_list_t release_heads;
1083                         get_release_seq_heads(curr, act, &release_heads);
1084                         for (unsigned int i = 0; i < release_heads.size(); i++)
1085                                 if (!curr->synchronize_with(release_heads[i]))
1086                                         set_bad_synchronization();
1087                         if (release_heads.size() != 0)
1088                                 updated = true;
1089                 }
1090         }
1091         return updated;
1092 }
1093
1094 /**
1095  * @brief Process the current action for thread-related activity
1096  *
1097  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1098  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1099  * synchronization, etc.  This function is a no-op for non-THREAD actions
1100  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1101  *
1102  * @param curr The current action
1103  * @return True if synchronization was updated or a thread completed
1104  */
1105 bool ModelChecker::process_thread_action(ModelAction *curr)
1106 {
1107         bool updated = false;
1108
1109         switch (curr->get_type()) {
1110         case THREAD_CREATE: {
1111                 thrd_t *thrd = (thrd_t *)curr->get_location();
1112                 struct thread_params *params = (struct thread_params *)curr->get_value();
1113                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1114                 add_thread(th);
1115                 th->set_creation(curr);
1116                 /* Promises can be satisfied by children */
1117                 for (unsigned int i = 0; i < promises->size(); i++) {
1118                         Promise *promise = (*promises)[i];
1119                         if (promise->thread_is_available(curr->get_tid()))
1120                                 promise->add_thread(th->get_id());
1121                 }
1122                 break;
1123         }
1124         case THREAD_JOIN: {
1125                 Thread *blocking = curr->get_thread_operand();
1126                 ModelAction *act = get_last_action(blocking->get_id());
1127                 curr->synchronize_with(act);
1128                 updated = true; /* trigger rel-seq checks */
1129                 break;
1130         }
1131         case THREAD_FINISH: {
1132                 Thread *th = get_thread(curr);
1133                 while (!th->wait_list_empty()) {
1134                         ModelAction *act = th->pop_wait_list();
1135                         scheduler->wake(get_thread(act));
1136                 }
1137                 th->complete();
1138                 /* Completed thread can't satisfy promises */
1139                 for (unsigned int i = 0; i < promises->size(); i++) {
1140                         Promise *promise = (*promises)[i];
1141                         if (promise->thread_is_available(th->get_id()))
1142                                 if (promise->eliminate_thread(th->get_id()))
1143                                         priv->failed_promise = true;
1144                 }
1145                 updated = true; /* trigger rel-seq checks */
1146                 break;
1147         }
1148         case THREAD_START: {
1149                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1150                 break;
1151         }
1152         default:
1153                 break;
1154         }
1155
1156         return updated;
1157 }
1158
1159 /**
1160  * @brief Process the current action for release sequence fixup activity
1161  *
1162  * Performs model-checker release sequence fixups for the current action,
1163  * forcing a single pending release sequence to break (with a given, potential
1164  * "loose" write) or to complete (i.e., synchronize). If a pending release
1165  * sequence forms a complete release sequence, then we must perform the fixup
1166  * synchronization, mo_graph additions, etc.
1167  *
1168  * @param curr The current action; must be a release sequence fixup action
1169  * @param work_queue The work queue to which to add work items as they are
1170  * generated
1171  */
1172 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1173 {
1174         const ModelAction *write = curr->get_node()->get_relseq_break();
1175         struct release_seq *sequence = pending_rel_seqs->back();
1176         pending_rel_seqs->pop_back();
1177         ASSERT(sequence);
1178         ModelAction *acquire = sequence->acquire;
1179         const ModelAction *rf = sequence->rf;
1180         const ModelAction *release = sequence->release;
1181         ASSERT(acquire);
1182         ASSERT(release);
1183         ASSERT(rf);
1184         ASSERT(release->same_thread(rf));
1185
1186         if (write == NULL) {
1187                 /**
1188                  * @todo Forcing a synchronization requires that we set
1189                  * modification order constraints. For instance, we can't allow
1190                  * a fixup sequence in which two separate read-acquire
1191                  * operations read from the same sequence, where the first one
1192                  * synchronizes and the other doesn't. Essentially, we can't
1193                  * allow any writes to insert themselves between 'release' and
1194                  * 'rf'
1195                  */
1196
1197                 /* Must synchronize */
1198                 if (!acquire->synchronize_with(release)) {
1199                         set_bad_synchronization();
1200                         return;
1201                 }
1202                 /* Re-check all pending release sequences */
1203                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1204                 /* Re-check act for mo_graph edges */
1205                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1206
1207                 /* propagate synchronization to later actions */
1208                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1209                 for (; (*rit) != acquire; rit++) {
1210                         ModelAction *propagate = *rit;
1211                         if (acquire->happens_before(propagate)) {
1212                                 propagate->synchronize_with(acquire);
1213                                 /* Re-check 'propagate' for mo_graph edges */
1214                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1215                         }
1216                 }
1217         } else {
1218                 /* Break release sequence with new edges:
1219                  *   release --mo--> write --mo--> rf */
1220                 mo_graph->addEdge(release, write);
1221                 mo_graph->addEdge(write, rf);
1222         }
1223
1224         /* See if we have realized a data race */
1225         checkDataRaces();
1226 }
1227
1228 /**
1229  * Initialize the current action by performing one or more of the following
1230  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1231  * in the NodeStack, manipulating backtracking sets, allocating and
1232  * initializing clock vectors, and computing the promises to fulfill.
1233  *
1234  * @param curr The current action, as passed from the user context; may be
1235  * freed/invalidated after the execution of this function, with a different
1236  * action "returned" its place (pass-by-reference)
1237  * @return True if curr is a newly-explored action; false otherwise
1238  */
1239 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1240 {
1241         ModelAction *newcurr;
1242
1243         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1244                 newcurr = process_rmw(*curr);
1245                 delete *curr;
1246
1247                 if (newcurr->is_rmw())
1248                         compute_promises(newcurr);
1249
1250                 *curr = newcurr;
1251                 return false;
1252         }
1253
1254         (*curr)->set_seq_number(get_next_seq_num());
1255
1256         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1257         if (newcurr) {
1258                 /* First restore type and order in case of RMW operation */
1259                 if ((*curr)->is_rmwr())
1260                         newcurr->copy_typeandorder(*curr);
1261
1262                 ASSERT((*curr)->get_location() == newcurr->get_location());
1263                 newcurr->copy_from_new(*curr);
1264
1265                 /* Discard duplicate ModelAction; use action from NodeStack */
1266                 delete *curr;
1267
1268                 /* Always compute new clock vector */
1269                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1270
1271                 *curr = newcurr;
1272                 return false; /* Action was explored previously */
1273         } else {
1274                 newcurr = *curr;
1275
1276                 /* Always compute new clock vector */
1277                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1278
1279                 /* Assign most recent release fence */
1280                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1281
1282                 /*
1283                  * Perform one-time actions when pushing new ModelAction onto
1284                  * NodeStack
1285                  */
1286                 if (newcurr->is_write())
1287                         compute_promises(newcurr);
1288                 else if (newcurr->is_relseq_fixup())
1289                         compute_relseq_breakwrites(newcurr);
1290                 else if (newcurr->is_wait())
1291                         newcurr->get_node()->set_misc_max(2);
1292                 else if (newcurr->is_notify_one()) {
1293                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1294                 }
1295                 return true; /* This was a new ModelAction */
1296         }
1297 }
1298
1299 /**
1300  * @brief Establish reads-from relation between two actions
1301  *
1302  * Perform basic operations involved with establishing a concrete rf relation,
1303  * including setting the ModelAction data and checking for release sequences.
1304  *
1305  * @param act The action that is reading (must be a read)
1306  * @param rf The action from which we are reading (must be a write)
1307  *
1308  * @return True if this read established synchronization
1309  */
1310 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1311 {
1312         act->set_read_from(rf);
1313         if (rf != NULL && act->is_acquire()) {
1314                 rel_heads_list_t release_heads;
1315                 get_release_seq_heads(act, act, &release_heads);
1316                 int num_heads = release_heads.size();
1317                 for (unsigned int i = 0; i < release_heads.size(); i++)
1318                         if (!act->synchronize_with(release_heads[i])) {
1319                                 set_bad_synchronization();
1320                                 num_heads--;
1321                         }
1322                 return num_heads > 0;
1323         }
1324         return false;
1325 }
1326
1327 /**
1328  * Check promises and eliminate potentially-satisfying threads when a thread is
1329  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1330  * no longer satisfy a promise generated from that thread.
1331  *
1332  * @param blocker The thread on which a thread is waiting
1333  * @param waiting The waiting thread
1334  */
1335 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1336 {
1337         for (unsigned int i = 0; i < promises->size(); i++) {
1338                 Promise *promise = (*promises)[i];
1339                 ModelAction *reader = promise->get_action();
1340                 if (reader->get_tid() != blocker->get_id())
1341                         continue;
1342                 if (!promise->thread_is_available(waiting->get_id()))
1343                         continue;
1344                 if (promise->eliminate_thread(waiting->get_id())) {
1345                         /* Promise has failed */
1346                         priv->failed_promise = true;
1347                 }
1348         }
1349 }
1350
1351 /**
1352  * @brief Check whether a model action is enabled.
1353  *
1354  * Checks whether a lock or join operation would be successful (i.e., is the
1355  * lock already locked, or is the joined thread already complete). If not, put
1356  * the action in a waiter list.
1357  *
1358  * @param curr is the ModelAction to check whether it is enabled.
1359  * @return a bool that indicates whether the action is enabled.
1360  */
1361 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1362         if (curr->is_lock()) {
1363                 std::mutex *lock = (std::mutex *)curr->get_location();
1364                 struct std::mutex_state *state = lock->get_state();
1365                 if (state->islocked) {
1366                         //Stick the action in the appropriate waiting queue
1367                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1368                         return false;
1369                 }
1370         } else if (curr->get_type() == THREAD_JOIN) {
1371                 Thread *blocking = (Thread *)curr->get_location();
1372                 if (!blocking->is_complete()) {
1373                         blocking->push_wait_list(curr);
1374                         thread_blocking_check_promises(blocking, get_thread(curr));
1375                         return false;
1376                 }
1377         }
1378
1379         return true;
1380 }
1381
1382 /**
1383  * This is the heart of the model checker routine. It performs model-checking
1384  * actions corresponding to a given "current action." Among other processes, it
1385  * calculates reads-from relationships, updates synchronization clock vectors,
1386  * forms a memory_order constraints graph, and handles replay/backtrack
1387  * execution when running permutations of previously-observed executions.
1388  *
1389  * @param curr The current action to process
1390  * @return The ModelAction that is actually executed; may be different than
1391  * curr; may be NULL, if the current action is not enabled to run
1392  */
1393 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1394 {
1395         ASSERT(curr);
1396         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1397
1398         if (!check_action_enabled(curr)) {
1399                 /* Make the execution look like we chose to run this action
1400                  * much later, when a lock/join can succeed */
1401                 get_thread(curr)->set_pending(curr);
1402                 scheduler->sleep(get_thread(curr));
1403                 return NULL;
1404         }
1405
1406         bool newly_explored = initialize_curr_action(&curr);
1407
1408         DBG();
1409         if (DBG_ENABLED())
1410                 curr->print();
1411
1412         wake_up_sleeping_actions(curr);
1413
1414         /* Add the action to lists before any other model-checking tasks */
1415         if (!second_part_of_rmw)
1416                 add_action_to_lists(curr);
1417
1418         /* Build may_read_from set for newly-created actions */
1419         if (newly_explored && curr->is_read())
1420                 build_may_read_from(curr);
1421
1422         /* Initialize work_queue with the "current action" work */
1423         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1424         while (!work_queue.empty() && !has_asserted()) {
1425                 WorkQueueEntry work = work_queue.front();
1426                 work_queue.pop_front();
1427
1428                 switch (work.type) {
1429                 case WORK_CHECK_CURR_ACTION: {
1430                         ModelAction *act = work.action;
1431                         bool update = false; /* update this location's release seq's */
1432                         bool update_all = false; /* update all release seq's */
1433
1434                         if (process_thread_action(curr))
1435                                 update_all = true;
1436
1437                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1438                                 update = true;
1439
1440                         if (act->is_write() && process_write(act))
1441                                 update = true;
1442
1443                         if (act->is_fence() && process_fence(act))
1444                                 update_all = true;
1445
1446                         if (act->is_mutex_op() && process_mutex(act))
1447                                 update_all = true;
1448
1449                         if (act->is_relseq_fixup())
1450                                 process_relseq_fixup(curr, &work_queue);
1451
1452                         if (update_all)
1453                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1454                         else if (update)
1455                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1456                         break;
1457                 }
1458                 case WORK_CHECK_RELEASE_SEQ:
1459                         resolve_release_sequences(work.location, &work_queue);
1460                         break;
1461                 case WORK_CHECK_MO_EDGES: {
1462                         /** @todo Complete verification of work_queue */
1463                         ModelAction *act = work.action;
1464                         bool updated = false;
1465
1466                         if (act->is_read()) {
1467                                 const ModelAction *rf = act->get_reads_from();
1468                                 const Promise *promise = act->get_reads_from_promise();
1469                                 if (rf) {
1470                                         if (r_modification_order(act, rf))
1471                                                 updated = true;
1472                                 } else if (promise) {
1473                                         if (r_modification_order(act, promise))
1474                                                 updated = true;
1475                                 }
1476                         }
1477                         if (act->is_write()) {
1478                                 if (w_modification_order(act))
1479                                         updated = true;
1480                         }
1481                         mo_graph->commitChanges();
1482
1483                         if (updated)
1484                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1485                         break;
1486                 }
1487                 default:
1488                         ASSERT(false);
1489                         break;
1490                 }
1491         }
1492
1493         check_curr_backtracking(curr);
1494         set_backtracking(curr);
1495         return curr;
1496 }
1497
1498 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1499 {
1500         Node *currnode = curr->get_node();
1501         Node *parnode = currnode->get_parent();
1502
1503         if ((parnode && !parnode->backtrack_empty()) ||
1504                          !currnode->misc_empty() ||
1505                          !currnode->read_from_past_empty() ||
1506                          !currnode->future_value_empty() ||
1507                          !currnode->promise_empty() ||
1508                          !currnode->relseq_break_empty()) {
1509                 set_latest_backtrack(curr);
1510         }
1511 }
1512
1513 bool ModelChecker::promises_expired() const
1514 {
1515         for (unsigned int i = 0; i < promises->size(); i++) {
1516                 Promise *promise = (*promises)[i];
1517                 if (promise->get_expiration() < priv->used_sequence_numbers)
1518                         return true;
1519         }
1520         return false;
1521 }
1522
1523 /**
1524  * This is the strongest feasibility check available.
1525  * @return whether the current trace (partial or complete) must be a prefix of
1526  * a feasible trace.
1527  */
1528 bool ModelChecker::isfeasibleprefix() const
1529 {
1530         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1531 }
1532
1533 /**
1534  * Print disagnostic information about an infeasible execution
1535  * @param prefix A string to prefix the output with; if NULL, then a default
1536  * message prefix will be provided
1537  */
1538 void ModelChecker::print_infeasibility(const char *prefix) const
1539 {
1540         char buf[100];
1541         char *ptr = buf;
1542         if (mo_graph->checkForCycles())
1543                 ptr += sprintf(ptr, "[mo cycle]");
1544         if (priv->failed_promise)
1545                 ptr += sprintf(ptr, "[failed promise]");
1546         if (priv->too_many_reads)
1547                 ptr += sprintf(ptr, "[too many reads]");
1548         if (priv->no_valid_reads)
1549                 ptr += sprintf(ptr, "[no valid reads-from]");
1550         if (priv->bad_synchronization)
1551                 ptr += sprintf(ptr, "[bad sw ordering]");
1552         if (promises_expired())
1553                 ptr += sprintf(ptr, "[promise expired]");
1554         if (promises->size() != 0)
1555                 ptr += sprintf(ptr, "[unresolved promise]");
1556         if (ptr != buf)
1557                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1558 }
1559
1560 /**
1561  * Returns whether the current completed trace is feasible, except for pending
1562  * release sequences.
1563  */
1564 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1565 {
1566         return !is_infeasible() && promises->size() == 0;
1567 }
1568
1569 /**
1570  * Check if the current partial trace is infeasible. Does not check any
1571  * end-of-execution flags, which might rule out the execution. Thus, this is
1572  * useful only for ruling an execution as infeasible.
1573  * @return whether the current partial trace is infeasible.
1574  */
1575 bool ModelChecker::is_infeasible() const
1576 {
1577         return mo_graph->checkForCycles() ||
1578                 priv->no_valid_reads ||
1579                 priv->failed_promise ||
1580                 priv->too_many_reads ||
1581                 priv->bad_synchronization ||
1582                 promises_expired();
1583 }
1584
1585 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1586 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1587         ModelAction *lastread = get_last_action(act->get_tid());
1588         lastread->process_rmw(act);
1589         if (act->is_rmw()) {
1590                 if (lastread->get_reads_from())
1591                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1592                 else
1593                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1594                 mo_graph->commitChanges();
1595         }
1596         return lastread;
1597 }
1598
1599 /**
1600  * Checks whether a thread has read from the same write for too many times
1601  * without seeing the effects of a later write.
1602  *
1603  * Basic idea:
1604  * 1) there must a different write that we could read from that would satisfy the modification order,
1605  * 2) we must have read from the same value in excess of maxreads times, and
1606  * 3) that other write must have been in the reads_from set for maxreads times.
1607  *
1608  * If so, we decide that the execution is no longer feasible.
1609  */
1610 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1611 {
1612         if (params.maxreads != 0) {
1613                 if (curr->get_node()->get_read_from_past_size() <= 1)
1614                         return;
1615                 //Must make sure that execution is currently feasible...  We could
1616                 //accidentally clear by rolling back
1617                 if (is_infeasible())
1618                         return;
1619                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1620                 int tid = id_to_int(curr->get_tid());
1621
1622                 /* Skip checks */
1623                 if ((int)thrd_lists->size() <= tid)
1624                         return;
1625                 action_list_t *list = &(*thrd_lists)[tid];
1626
1627                 action_list_t::reverse_iterator rit = list->rbegin();
1628                 /* Skip past curr */
1629                 for (; (*rit) != curr; rit++)
1630                         ;
1631                 /* go past curr now */
1632                 rit++;
1633
1634                 action_list_t::reverse_iterator ritcopy = rit;
1635                 //See if we have enough reads from the same value
1636                 int count = 0;
1637                 for (; count < params.maxreads; rit++, count++) {
1638                         if (rit == list->rend())
1639                                 return;
1640                         ModelAction *act = *rit;
1641                         if (!act->is_read())
1642                                 return;
1643
1644                         if (act->get_reads_from() != rf)
1645                                 return;
1646                         if (act->get_node()->get_read_from_past_size() <= 1)
1647                                 return;
1648                 }
1649                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1650                         /* Get write */
1651                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1652
1653                         /* Need a different write */
1654                         if (write == rf)
1655                                 continue;
1656
1657                         /* Test to see whether this is a feasible write to read from */
1658                         /** NOTE: all members of read-from set should be
1659                          *  feasible, so we no longer check it here **/
1660
1661                         rit = ritcopy;
1662
1663                         bool feasiblewrite = true;
1664                         //new we need to see if this write works for everyone
1665
1666                         for (int loop = count; loop > 0; loop--, rit++) {
1667                                 ModelAction *act = *rit;
1668                                 bool foundvalue = false;
1669                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1670                                         if (act->get_node()->get_read_from_past(j) == write) {
1671                                                 foundvalue = true;
1672                                                 break;
1673                                         }
1674                                 }
1675                                 if (!foundvalue) {
1676                                         feasiblewrite = false;
1677                                         break;
1678                                 }
1679                         }
1680                         if (feasiblewrite) {
1681                                 priv->too_many_reads = true;
1682                                 return;
1683                         }
1684                 }
1685         }
1686 }
1687
1688 /**
1689  * Updates the mo_graph with the constraints imposed from the current
1690  * read.
1691  *
1692  * Basic idea is the following: Go through each other thread and find
1693  * the last action that happened before our read.  Two cases:
1694  *
1695  * (1) The action is a write => that write must either occur before
1696  * the write we read from or be the write we read from.
1697  *
1698  * (2) The action is a read => the write that that action read from
1699  * must occur before the write we read from or be the same write.
1700  *
1701  * @param curr The current action. Must be a read.
1702  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1703  * @return True if modification order edges were added; false otherwise
1704  */
1705 template <typename rf_type>
1706 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1707 {
1708         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1709         unsigned int i;
1710         bool added = false;
1711         ASSERT(curr->is_read());
1712
1713         /* Last SC fence in the current thread */
1714         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1715
1716         /* Iterate over all threads */
1717         for (i = 0; i < thrd_lists->size(); i++) {
1718                 /* Last SC fence in thread i */
1719                 ModelAction *last_sc_fence_thread_local = NULL;
1720                 if (int_to_id((int)i) != curr->get_tid())
1721                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1722
1723                 /* Last SC fence in thread i, before last SC fence in current thread */
1724                 ModelAction *last_sc_fence_thread_before = NULL;
1725                 if (last_sc_fence_local)
1726                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1727
1728                 /* Iterate over actions in thread, starting from most recent */
1729                 action_list_t *list = &(*thrd_lists)[i];
1730                 action_list_t::reverse_iterator rit;
1731                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1732                         ModelAction *act = *rit;
1733
1734                         if (act->is_write() && !act->equals(rf) && act != curr) {
1735                                 /* C++, Section 29.3 statement 5 */
1736                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1737                                                 *act < *last_sc_fence_thread_local) {
1738                                         added = mo_graph->addEdge(act, rf) || added;
1739                                         break;
1740                                 }
1741                                 /* C++, Section 29.3 statement 4 */
1742                                 else if (act->is_seqcst() && last_sc_fence_local &&
1743                                                 *act < *last_sc_fence_local) {
1744                                         added = mo_graph->addEdge(act, rf) || added;
1745                                         break;
1746                                 }
1747                                 /* C++, Section 29.3 statement 6 */
1748                                 else if (last_sc_fence_thread_before &&
1749                                                 *act < *last_sc_fence_thread_before) {
1750                                         added = mo_graph->addEdge(act, rf) || added;
1751                                         break;
1752                                 }
1753                         }
1754
1755                         /*
1756                          * Include at most one act per-thread that "happens
1757                          * before" curr. Don't consider reflexively.
1758                          */
1759                         if (act->happens_before(curr) && act != curr) {
1760                                 if (act->is_write()) {
1761                                         if (!act->equals(rf)) {
1762                                                 added = mo_graph->addEdge(act, rf) || added;
1763                                         }
1764                                 } else {
1765                                         const ModelAction *prevrf = act->get_reads_from();
1766                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1767                                         if (prevrf) {
1768                                                 if (!prevrf->equals(rf))
1769                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1770                                         } else if (!prevrf_promise->equals(rf)) {
1771                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1772                                         }
1773                                 }
1774                                 break;
1775                         }
1776                 }
1777         }
1778
1779         /*
1780          * All compatible, thread-exclusive promises must be ordered after any
1781          * concrete loads from the same thread
1782          */
1783         for (unsigned int i = 0; i < promises->size(); i++)
1784                 if ((*promises)[i]->is_compatible_exclusive(curr))
1785                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1786
1787         return added;
1788 }
1789
1790 /**
1791  * Updates the mo_graph with the constraints imposed from the current write.
1792  *
1793  * Basic idea is the following: Go through each other thread and find
1794  * the lastest action that happened before our write.  Two cases:
1795  *
1796  * (1) The action is a write => that write must occur before
1797  * the current write
1798  *
1799  * (2) The action is a read => the write that that action read from
1800  * must occur before the current write.
1801  *
1802  * This method also handles two other issues:
1803  *
1804  * (I) Sequential Consistency: Making sure that if the current write is
1805  * seq_cst, that it occurs after the previous seq_cst write.
1806  *
1807  * (II) Sending the write back to non-synchronizing reads.
1808  *
1809  * @param curr The current action. Must be a write.
1810  * @return True if modification order edges were added; false otherwise
1811  */
1812 bool ModelChecker::w_modification_order(ModelAction *curr)
1813 {
1814         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1815         unsigned int i;
1816         bool added = false;
1817         ASSERT(curr->is_write());
1818
1819         if (curr->is_seqcst()) {
1820                 /* We have to at least see the last sequentially consistent write,
1821                          so we are initialized. */
1822                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1823                 if (last_seq_cst != NULL) {
1824                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1825                 }
1826         }
1827
1828         /* Last SC fence in the current thread */
1829         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1830
1831         /* Iterate over all threads */
1832         for (i = 0; i < thrd_lists->size(); i++) {
1833                 /* Last SC fence in thread i, before last SC fence in current thread */
1834                 ModelAction *last_sc_fence_thread_before = NULL;
1835                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1836                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1837
1838                 /* Iterate over actions in thread, starting from most recent */
1839                 action_list_t *list = &(*thrd_lists)[i];
1840                 action_list_t::reverse_iterator rit;
1841                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1842                         ModelAction *act = *rit;
1843                         if (act == curr) {
1844                                 /*
1845                                  * 1) If RMW and it actually read from something, then we
1846                                  * already have all relevant edges, so just skip to next
1847                                  * thread.
1848                                  *
1849                                  * 2) If RMW and it didn't read from anything, we should
1850                                  * whatever edge we can get to speed up convergence.
1851                                  *
1852                                  * 3) If normal write, we need to look at earlier actions, so
1853                                  * continue processing list.
1854                                  */
1855                                 if (curr->is_rmw()) {
1856                                         if (curr->get_reads_from() != NULL)
1857                                                 break;
1858                                         else
1859                                                 continue;
1860                                 } else
1861                                         continue;
1862                         }
1863
1864                         /* C++, Section 29.3 statement 7 */
1865                         if (last_sc_fence_thread_before && act->is_write() &&
1866                                         *act < *last_sc_fence_thread_before) {
1867                                 added = mo_graph->addEdge(act, curr) || added;
1868                                 break;
1869                         }
1870
1871                         /*
1872                          * Include at most one act per-thread that "happens
1873                          * before" curr
1874                          */
1875                         if (act->happens_before(curr)) {
1876                                 /*
1877                                  * Note: if act is RMW, just add edge:
1878                                  *   act --mo--> curr
1879                                  * The following edge should be handled elsewhere:
1880                                  *   readfrom(act) --mo--> act
1881                                  */
1882                                 if (act->is_write())
1883                                         added = mo_graph->addEdge(act, curr) || added;
1884                                 else if (act->is_read()) {
1885                                         //if previous read accessed a null, just keep going
1886                                         if (act->get_reads_from() == NULL)
1887                                                 continue;
1888                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1889                                 }
1890                                 break;
1891                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1892                                                      !act->same_thread(curr)) {
1893                                 /* We have an action that:
1894                                    (1) did not happen before us
1895                                    (2) is a read and we are a write
1896                                    (3) cannot synchronize with us
1897                                    (4) is in a different thread
1898                                    =>
1899                                    that read could potentially read from our write.  Note that
1900                                    these checks are overly conservative at this point, we'll
1901                                    do more checks before actually removing the
1902                                    pendingfuturevalue.
1903
1904                                  */
1905                                 if (thin_air_constraint_may_allow(curr, act)) {
1906                                         if (!is_infeasible())
1907                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1908                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1909                                                 add_future_value(curr, act);
1910                                 }
1911                         }
1912                 }
1913         }
1914
1915         /*
1916          * All compatible, thread-exclusive promises must be ordered after any
1917          * concrete stores to the same thread, or else they can be merged with
1918          * this store later
1919          */
1920         for (unsigned int i = 0; i < promises->size(); i++)
1921                 if ((*promises)[i]->is_compatible_exclusive(curr))
1922                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1923
1924         return added;
1925 }
1926
1927 /** Arbitrary reads from the future are not allowed.  Section 29.3
1928  * part 9 places some constraints.  This method checks one result of constraint
1929  * constraint.  Others require compiler support. */
1930 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1931 {
1932         if (!writer->is_rmw())
1933                 return true;
1934
1935         if (!reader->is_rmw())
1936                 return true;
1937
1938         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1939                 if (search == reader)
1940                         return false;
1941                 if (search->get_tid() == reader->get_tid() &&
1942                                 search->happens_before(reader))
1943                         break;
1944         }
1945
1946         return true;
1947 }
1948
1949 /**
1950  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1951  * some constraints. This method checks one the following constraint (others
1952  * require compiler support):
1953  *
1954  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1955  */
1956 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1957 {
1958         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1959         unsigned int i;
1960         /* Iterate over all threads */
1961         for (i = 0; i < thrd_lists->size(); i++) {
1962                 const ModelAction *write_after_read = NULL;
1963
1964                 /* Iterate over actions in thread, starting from most recent */
1965                 action_list_t *list = &(*thrd_lists)[i];
1966                 action_list_t::reverse_iterator rit;
1967                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1968                         ModelAction *act = *rit;
1969
1970                         /* Don't disallow due to act == reader */
1971                         if (!reader->happens_before(act) || reader == act)
1972                                 break;
1973                         else if (act->is_write())
1974                                 write_after_read = act;
1975                         else if (act->is_read() && act->get_reads_from() != NULL)
1976                                 write_after_read = act->get_reads_from();
1977                 }
1978
1979                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1980                         return false;
1981         }
1982         return true;
1983 }
1984
1985 /**
1986  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1987  * The ModelAction under consideration is expected to be taking part in
1988  * release/acquire synchronization as an object of the "reads from" relation.
1989  * Note that this can only provide release sequence support for RMW chains
1990  * which do not read from the future, as those actions cannot be traced until
1991  * their "promise" is fulfilled. Similarly, we may not even establish the
1992  * presence of a release sequence with certainty, as some modification order
1993  * constraints may be decided further in the future. Thus, this function
1994  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1995  * and a boolean representing certainty.
1996  *
1997  * @param rf The action that might be part of a release sequence. Must be a
1998  * write.
1999  * @param release_heads A pass-by-reference style return parameter. After
2000  * execution of this function, release_heads will contain the heads of all the
2001  * relevant release sequences, if any exists with certainty
2002  * @param pending A pass-by-reference style return parameter which is only used
2003  * when returning false (i.e., uncertain). Returns most information regarding
2004  * an uncertain release sequence, including any write operations that might
2005  * break the sequence.
2006  * @return true, if the ModelChecker is certain that release_heads is complete;
2007  * false otherwise
2008  */
2009 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2010                 rel_heads_list_t *release_heads,
2011                 struct release_seq *pending) const
2012 {
2013         /* Only check for release sequences if there are no cycles */
2014         if (mo_graph->checkForCycles())
2015                 return false;
2016
2017         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2018                 ASSERT(rf->is_write());
2019
2020                 if (rf->is_release())
2021                         release_heads->push_back(rf);
2022                 else if (rf->get_last_fence_release())
2023                         release_heads->push_back(rf->get_last_fence_release());
2024                 if (!rf->is_rmw())
2025                         break; /* End of RMW chain */
2026
2027                 /** @todo Need to be smarter here...  In the linux lock
2028                  * example, this will run to the beginning of the program for
2029                  * every acquire. */
2030                 /** @todo The way to be smarter here is to keep going until 1
2031                  * thread has a release preceded by an acquire and you've seen
2032                  *       both. */
2033
2034                 /* acq_rel RMW is a sufficient stopping condition */
2035                 if (rf->is_acquire() && rf->is_release())
2036                         return true; /* complete */
2037         };
2038         if (!rf) {
2039                 /* read from future: need to settle this later */
2040                 pending->rf = NULL;
2041                 return false; /* incomplete */
2042         }
2043
2044         if (rf->is_release())
2045                 return true; /* complete */
2046
2047         /* else relaxed write
2048          * - check for fence-release in the same thread (29.8, stmt. 3)
2049          * - check modification order for contiguous subsequence
2050          *   -> rf must be same thread as release */
2051
2052         const ModelAction *fence_release = rf->get_last_fence_release();
2053         /* Synchronize with a fence-release unconditionally; we don't need to
2054          * find any more "contiguous subsequence..." for it */
2055         if (fence_release)
2056                 release_heads->push_back(fence_release);
2057
2058         int tid = id_to_int(rf->get_tid());
2059         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2060         action_list_t *list = &(*thrd_lists)[tid];
2061         action_list_t::const_reverse_iterator rit;
2062
2063         /* Find rf in the thread list */
2064         rit = std::find(list->rbegin(), list->rend(), rf);
2065         ASSERT(rit != list->rend());
2066
2067         /* Find the last {write,fence}-release */
2068         for (; rit != list->rend(); rit++) {
2069                 if (fence_release && *(*rit) < *fence_release)
2070                         break;
2071                 if ((*rit)->is_release())
2072                         break;
2073         }
2074         if (rit == list->rend()) {
2075                 /* No write-release in this thread */
2076                 return true; /* complete */
2077         } else if (fence_release && *(*rit) < *fence_release) {
2078                 /* The fence-release is more recent (and so, "stronger") than
2079                  * the most recent write-release */
2080                 return true; /* complete */
2081         } /* else, need to establish contiguous release sequence */
2082         ModelAction *release = *rit;
2083
2084         ASSERT(rf->same_thread(release));
2085
2086         pending->writes.clear();
2087
2088         bool certain = true;
2089         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2090                 if (id_to_int(rf->get_tid()) == (int)i)
2091                         continue;
2092                 list = &(*thrd_lists)[i];
2093
2094                 /* Can we ensure no future writes from this thread may break
2095                  * the release seq? */
2096                 bool future_ordered = false;
2097
2098                 ModelAction *last = get_last_action(int_to_id(i));
2099                 Thread *th = get_thread(int_to_id(i));
2100                 if ((last && rf->happens_before(last)) ||
2101                                 !is_enabled(th) ||
2102                                 th->is_complete())
2103                         future_ordered = true;
2104
2105                 ASSERT(!th->is_model_thread() || future_ordered);
2106
2107                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2108                         const ModelAction *act = *rit;
2109                         /* Reach synchronization -> this thread is complete */
2110                         if (act->happens_before(release))
2111                                 break;
2112                         if (rf->happens_before(act)) {
2113                                 future_ordered = true;
2114                                 continue;
2115                         }
2116
2117                         /* Only non-RMW writes can break release sequences */
2118                         if (!act->is_write() || act->is_rmw())
2119                                 continue;
2120
2121                         /* Check modification order */
2122                         if (mo_graph->checkReachable(rf, act)) {
2123                                 /* rf --mo--> act */
2124                                 future_ordered = true;
2125                                 continue;
2126                         }
2127                         if (mo_graph->checkReachable(act, release))
2128                                 /* act --mo--> release */
2129                                 break;
2130                         if (mo_graph->checkReachable(release, act) &&
2131                                       mo_graph->checkReachable(act, rf)) {
2132                                 /* release --mo-> act --mo--> rf */
2133                                 return true; /* complete */
2134                         }
2135                         /* act may break release sequence */
2136                         pending->writes.push_back(act);
2137                         certain = false;
2138                 }
2139                 if (!future_ordered)
2140                         certain = false; /* This thread is uncertain */
2141         }
2142
2143         if (certain) {
2144                 release_heads->push_back(release);
2145                 pending->writes.clear();
2146         } else {
2147                 pending->release = release;
2148                 pending->rf = rf;
2149         }
2150         return certain;
2151 }
2152
2153 /**
2154  * An interface for getting the release sequence head(s) with which a
2155  * given ModelAction must synchronize. This function only returns a non-empty
2156  * result when it can locate a release sequence head with certainty. Otherwise,
2157  * it may mark the internal state of the ModelChecker so that it will handle
2158  * the release sequence at a later time, causing @a acquire to update its
2159  * synchronization at some later point in execution.
2160  *
2161  * @param acquire The 'acquire' action that may synchronize with a release
2162  * sequence
2163  * @param read The read action that may read from a release sequence; this may
2164  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2165  * when 'acquire' is a fence-acquire)
2166  * @param release_heads A pass-by-reference return parameter. Will be filled
2167  * with the head(s) of the release sequence(s), if they exists with certainty.
2168  * @see ModelChecker::release_seq_heads
2169  */
2170 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2171                 ModelAction *read, rel_heads_list_t *release_heads)
2172 {
2173         const ModelAction *rf = read->get_reads_from();
2174         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2175         sequence->acquire = acquire;
2176         sequence->read = read;
2177
2178         if (!release_seq_heads(rf, release_heads, sequence)) {
2179                 /* add act to 'lazy checking' list */
2180                 pending_rel_seqs->push_back(sequence);
2181         } else {
2182                 snapshot_free(sequence);
2183         }
2184 }
2185
2186 /**
2187  * Attempt to resolve all stashed operations that might synchronize with a
2188  * release sequence for a given location. This implements the "lazy" portion of
2189  * determining whether or not a release sequence was contiguous, since not all
2190  * modification order information is present at the time an action occurs.
2191  *
2192  * @param location The location/object that should be checked for release
2193  * sequence resolutions. A NULL value means to check all locations.
2194  * @param work_queue The work queue to which to add work items as they are
2195  * generated
2196  * @return True if any updates occurred (new synchronization, new mo_graph
2197  * edges)
2198  */
2199 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2200 {
2201         bool updated = false;
2202         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2203         while (it != pending_rel_seqs->end()) {
2204                 struct release_seq *pending = *it;
2205                 ModelAction *acquire = pending->acquire;
2206                 const ModelAction *read = pending->read;
2207
2208                 /* Only resolve sequences on the given location, if provided */
2209                 if (location && read->get_location() != location) {
2210                         it++;
2211                         continue;
2212                 }
2213
2214                 const ModelAction *rf = read->get_reads_from();
2215                 rel_heads_list_t release_heads;
2216                 bool complete;
2217                 complete = release_seq_heads(rf, &release_heads, pending);
2218                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2219                         if (!acquire->has_synchronized_with(release_heads[i])) {
2220                                 if (acquire->synchronize_with(release_heads[i]))
2221                                         updated = true;
2222                                 else
2223                                         set_bad_synchronization();
2224                         }
2225                 }
2226
2227                 if (updated) {
2228                         /* Re-check all pending release sequences */
2229                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2230                         /* Re-check read-acquire for mo_graph edges */
2231                         if (acquire->is_read())
2232                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2233
2234                         /* propagate synchronization to later actions */
2235                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2236                         for (; (*rit) != acquire; rit++) {
2237                                 ModelAction *propagate = *rit;
2238                                 if (acquire->happens_before(propagate)) {
2239                                         propagate->synchronize_with(acquire);
2240                                         /* Re-check 'propagate' for mo_graph edges */
2241                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2242                                 }
2243                         }
2244                 }
2245                 if (complete) {
2246                         it = pending_rel_seqs->erase(it);
2247                         snapshot_free(pending);
2248                 } else {
2249                         it++;
2250                 }
2251         }
2252
2253         // If we resolved promises or data races, see if we have realized a data race.
2254         checkDataRaces();
2255
2256         return updated;
2257 }
2258
2259 /**
2260  * Performs various bookkeeping operations for the current ModelAction. For
2261  * instance, adds action to the per-object, per-thread action vector and to the
2262  * action trace list of all thread actions.
2263  *
2264  * @param act is the ModelAction to add.
2265  */
2266 void ModelChecker::add_action_to_lists(ModelAction *act)
2267 {
2268         int tid = id_to_int(act->get_tid());
2269         ModelAction *uninit = NULL;
2270         int uninit_id = -1;
2271         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2272         if (list->empty() && act->is_atomic_var()) {
2273                 uninit = new_uninitialized_action(act->get_location());
2274                 uninit_id = id_to_int(uninit->get_tid());
2275                 list->push_back(uninit);
2276         }
2277         list->push_back(act);
2278
2279         action_trace->push_back(act);
2280         if (uninit)
2281                 action_trace->push_front(uninit);
2282
2283         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2284         if (tid >= (int)vec->size())
2285                 vec->resize(priv->next_thread_id);
2286         (*vec)[tid].push_back(act);
2287         if (uninit)
2288                 (*vec)[uninit_id].push_front(uninit);
2289
2290         if ((int)thrd_last_action->size() <= tid)
2291                 thrd_last_action->resize(get_num_threads());
2292         (*thrd_last_action)[tid] = act;
2293         if (uninit)
2294                 (*thrd_last_action)[uninit_id] = uninit;
2295
2296         if (act->is_fence() && act->is_release()) {
2297                 if ((int)thrd_last_fence_release->size() <= tid)
2298                         thrd_last_fence_release->resize(get_num_threads());
2299                 (*thrd_last_fence_release)[tid] = act;
2300         }
2301
2302         if (act->is_wait()) {
2303                 void *mutex_loc = (void *) act->get_value();
2304                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2305
2306                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2307                 if (tid >= (int)vec->size())
2308                         vec->resize(priv->next_thread_id);
2309                 (*vec)[tid].push_back(act);
2310         }
2311 }
2312
2313 /**
2314  * @brief Get the last action performed by a particular Thread
2315  * @param tid The thread ID of the Thread in question
2316  * @return The last action in the thread
2317  */
2318 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2319 {
2320         int threadid = id_to_int(tid);
2321         if (threadid < (int)thrd_last_action->size())
2322                 return (*thrd_last_action)[id_to_int(tid)];
2323         else
2324                 return NULL;
2325 }
2326
2327 /**
2328  * @brief Get the last fence release performed by a particular Thread
2329  * @param tid The thread ID of the Thread in question
2330  * @return The last fence release in the thread, if one exists; NULL otherwise
2331  */
2332 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2333 {
2334         int threadid = id_to_int(tid);
2335         if (threadid < (int)thrd_last_fence_release->size())
2336                 return (*thrd_last_fence_release)[id_to_int(tid)];
2337         else
2338                 return NULL;
2339 }
2340
2341 /**
2342  * Gets the last memory_order_seq_cst write (in the total global sequence)
2343  * performed on a particular object (i.e., memory location), not including the
2344  * current action.
2345  * @param curr The current ModelAction; also denotes the object location to
2346  * check
2347  * @return The last seq_cst write
2348  */
2349 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2350 {
2351         void *location = curr->get_location();
2352         action_list_t *list = get_safe_ptr_action(obj_map, location);
2353         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2354         action_list_t::reverse_iterator rit;
2355         for (rit = list->rbegin(); rit != list->rend(); rit++)
2356                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2357                         return *rit;
2358         return NULL;
2359 }
2360
2361 /**
2362  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2363  * performed in a particular thread, prior to a particular fence.
2364  * @param tid The ID of the thread to check
2365  * @param before_fence The fence from which to begin the search; if NULL, then
2366  * search for the most recent fence in the thread.
2367  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2368  */
2369 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2370 {
2371         /* All fences should have NULL location */
2372         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2373         action_list_t::reverse_iterator rit = list->rbegin();
2374
2375         if (before_fence) {
2376                 for (; rit != list->rend(); rit++)
2377                         if (*rit == before_fence)
2378                                 break;
2379
2380                 ASSERT(*rit == before_fence);
2381                 rit++;
2382         }
2383
2384         for (; rit != list->rend(); rit++)
2385                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2386                         return *rit;
2387         return NULL;
2388 }
2389
2390 /**
2391  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2392  * location). This function identifies the mutex according to the current
2393  * action, which is presumed to perform on the same mutex.
2394  * @param curr The current ModelAction; also denotes the object location to
2395  * check
2396  * @return The last unlock operation
2397  */
2398 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2399 {
2400         void *location = curr->get_location();
2401         action_list_t *list = get_safe_ptr_action(obj_map, location);
2402         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2403         action_list_t::reverse_iterator rit;
2404         for (rit = list->rbegin(); rit != list->rend(); rit++)
2405                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2406                         return *rit;
2407         return NULL;
2408 }
2409
2410 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2411 {
2412         ModelAction *parent = get_last_action(tid);
2413         if (!parent)
2414                 parent = get_thread(tid)->get_creation();
2415         return parent;
2416 }
2417
2418 /**
2419  * Returns the clock vector for a given thread.
2420  * @param tid The thread whose clock vector we want
2421  * @return Desired clock vector
2422  */
2423 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2424 {
2425         return get_parent_action(tid)->get_cv();
2426 }
2427
2428 /**
2429  * Resolve a set of Promises with a current write. The set is provided in the
2430  * Node corresponding to @a write.
2431  * @param write The ModelAction that is fulfilling Promises
2432  * @return True if promises were resolved; false otherwise
2433  */
2434 bool ModelChecker::resolve_promises(ModelAction *write)
2435 {
2436         bool haveResolved = false;
2437         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2438         promise_list_t mustResolve, resolved;
2439
2440         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2441                 Promise *promise = (*promises)[promise_index];
2442                 if (write->get_node()->get_promise(i)) {
2443                         ModelAction *read = promise->get_action();
2444                         read_from(read, write);
2445                         //Make sure the promise's value matches the write's value
2446                         ASSERT(promise->is_compatible(write));
2447                         mo_graph->resolvePromise(promise, write, &mustResolve);
2448
2449                         resolved.push_back(promise);
2450                         promises->erase(promises->begin() + promise_index);
2451                         actions_to_check.push_back(read);
2452
2453                         haveResolved = true;
2454                 } else
2455                         promise_index++;
2456         }
2457
2458         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2459                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2460                                 == resolved.end())
2461                         priv->failed_promise = true;
2462         }
2463         for (unsigned int i = 0; i < resolved.size(); i++)
2464                 delete resolved[i];
2465         //Check whether reading these writes has made threads unable to
2466         //resolve promises
2467
2468         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2469                 ModelAction *read = actions_to_check[i];
2470                 mo_check_promises(read, true);
2471         }
2472
2473         return haveResolved;
2474 }
2475
2476 /**
2477  * Compute the set of promises that could potentially be satisfied by this
2478  * action. Note that the set computation actually appears in the Node, not in
2479  * ModelChecker.
2480  * @param curr The ModelAction that may satisfy promises
2481  */
2482 void ModelChecker::compute_promises(ModelAction *curr)
2483 {
2484         for (unsigned int i = 0; i < promises->size(); i++) {
2485                 Promise *promise = (*promises)[i];
2486                 const ModelAction *act = promise->get_action();
2487                 ASSERT(act->is_read());
2488                 if (!act->happens_before(curr) &&
2489                                 !act->could_synchronize_with(curr) &&
2490                                 promise->is_compatible(curr) &&
2491                                 promise->get_value() == curr->get_value()) {
2492                         curr->get_node()->set_promise(i, act->is_rmw());
2493                 }
2494         }
2495 }
2496
2497 /** Checks promises in response to change in ClockVector Threads. */
2498 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2499 {
2500         for (unsigned int i = 0; i < promises->size(); i++) {
2501                 Promise *promise = (*promises)[i];
2502                 const ModelAction *act = promise->get_action();
2503                 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2504                                 merge_cv->synchronized_since(act)) {
2505                         if (promise->eliminate_thread(tid)) {
2506                                 //Promise has failed
2507                                 priv->failed_promise = true;
2508                                 return;
2509                         }
2510                 }
2511         }
2512 }
2513
2514 void ModelChecker::check_promises_thread_disabled()
2515 {
2516         for (unsigned int i = 0; i < promises->size(); i++) {
2517                 Promise *promise = (*promises)[i];
2518                 if (promise->has_failed()) {
2519                         priv->failed_promise = true;
2520                         return;
2521                 }
2522         }
2523 }
2524
2525 /**
2526  * @brief Checks promises in response to addition to modification order for
2527  * threads.
2528  *
2529  * We test whether threads are still available for satisfying promises after an
2530  * addition to our modification order constraints. Those that are unavailable
2531  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2532  * that promise has failed.
2533  *
2534  * @param act The ModelAction which updated the modification order
2535  * @param is_read_check Should be true if act is a read and we must check for
2536  * updates to the store from which it read (there is a distinction here for
2537  * RMW's, which are both a load and a store)
2538  */
2539 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2540 {
2541         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2542
2543         for (unsigned int i = 0; i < promises->size(); i++) {
2544                 Promise *promise = (*promises)[i];
2545                 const ModelAction *pread = promise->get_action();
2546
2547                 // Is this promise on the same location?
2548                 if (!pread->same_var(write))
2549                         continue;
2550
2551                 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2552                         priv->failed_promise = true;
2553                         return;
2554                 }
2555
2556                 // Don't do any lookups twice for the same thread
2557                 if (!promise->thread_is_available(act->get_tid()))
2558                         continue;
2559
2560                 if (mo_graph->checkReachable(promise, write)) {
2561                         if (mo_graph->checkPromise(write, promise)) {
2562                                 priv->failed_promise = true;
2563                                 return;
2564                         }
2565                 }
2566         }
2567 }
2568
2569 /**
2570  * Compute the set of writes that may break the current pending release
2571  * sequence. This information is extracted from previou release sequence
2572  * calculations.
2573  *
2574  * @param curr The current ModelAction. Must be a release sequence fixup
2575  * action.
2576  */
2577 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2578 {
2579         if (pending_rel_seqs->empty())
2580                 return;
2581
2582         struct release_seq *pending = pending_rel_seqs->back();
2583         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2584                 const ModelAction *write = pending->writes[i];
2585                 curr->get_node()->add_relseq_break(write);
2586         }
2587
2588         /* NULL means don't break the sequence; just synchronize */
2589         curr->get_node()->add_relseq_break(NULL);
2590 }
2591
2592 /**
2593  * Build up an initial set of all past writes that this 'read' action may read
2594  * from, as well as any previously-observed future values that must still be valid.
2595  *
2596  * @param curr is the current ModelAction that we are exploring; it must be a
2597  * 'read' operation.
2598  */
2599 void ModelChecker::build_may_read_from(ModelAction *curr)
2600 {
2601         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2602         unsigned int i;
2603         ASSERT(curr->is_read());
2604
2605         ModelAction *last_sc_write = NULL;
2606
2607         if (curr->is_seqcst())
2608                 last_sc_write = get_last_seq_cst_write(curr);
2609
2610         /* Iterate over all threads */
2611         for (i = 0; i < thrd_lists->size(); i++) {
2612                 /* Iterate over actions in thread, starting from most recent */
2613                 action_list_t *list = &(*thrd_lists)[i];
2614                 action_list_t::reverse_iterator rit;
2615                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2616                         ModelAction *act = *rit;
2617
2618                         /* Only consider 'write' actions */
2619                         if (!act->is_write() || act == curr)
2620                                 continue;
2621
2622                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2623                         bool allow_read = true;
2624
2625                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2626                                 allow_read = false;
2627                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2628                                 allow_read = false;
2629
2630                         if (allow_read) {
2631                                 /* Only add feasible reads */
2632                                 mo_graph->startChanges();
2633                                 r_modification_order(curr, act);
2634                                 if (!is_infeasible())
2635                                         curr->get_node()->add_read_from_past(act);
2636                                 mo_graph->rollbackChanges();
2637                         }
2638
2639                         /* Include at most one act per-thread that "happens before" curr */
2640                         if (act->happens_before(curr))
2641                                 break;
2642                 }
2643         }
2644
2645         /* Inherit existing, promised future values */
2646         for (i = 0; i < promises->size(); i++) {
2647                 const Promise *promise = (*promises)[i];
2648                 const ModelAction *promise_read = promise->get_action();
2649                 if (promise_read->same_var(curr)) {
2650                         /* Only add feasible future-values */
2651                         mo_graph->startChanges();
2652                         r_modification_order(curr, promise);
2653                         if (!is_infeasible()) {
2654                                 const struct future_value fv = promise->get_fv();
2655                                 curr->get_node()->add_future_value(fv);
2656                         }
2657                         mo_graph->rollbackChanges();
2658                 }
2659         }
2660
2661         /* We may find no valid may-read-from only if the execution is doomed */
2662         if (!curr->get_node()->get_read_from_past_size() && curr->get_node()->future_value_empty()) {
2663                 priv->no_valid_reads = true;
2664                 set_assert();
2665         }
2666
2667         if (DBG_ENABLED()) {
2668                 model_print("Reached read action:\n");
2669                 curr->print();
2670                 model_print("Printing read_from_past\n");
2671                 curr->get_node()->print_read_from_past();
2672                 model_print("End printing read_from_past\n");
2673         }
2674 }
2675
2676 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2677 {
2678         for ( ; write != NULL; write = write->get_reads_from()) {
2679                 /* UNINIT actions don't have a Node, and they never sleep */
2680                 if (write->is_uninitialized())
2681                         return true;
2682                 Node *prevnode = write->get_node()->get_parent();
2683
2684                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2685                 if (write->is_release() && thread_sleep)
2686                         return true;
2687                 if (!write->is_rmw())
2688                         return false;
2689         }
2690         return true;
2691 }
2692
2693 /**
2694  * @brief Create a new action representing an uninitialized atomic
2695  * @param location The memory location of the atomic object
2696  * @return A pointer to a new ModelAction
2697  */
2698 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2699 {
2700         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2701         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2702         act->create_cv(NULL);
2703         return act;
2704 }
2705
2706 static void print_list(action_list_t *list)
2707 {
2708         action_list_t::iterator it;
2709
2710         model_print("---------------------------------------------------------------------\n");
2711
2712         unsigned int hash = 0;
2713
2714         for (it = list->begin(); it != list->end(); it++) {
2715                 (*it)->print();
2716                 hash = hash^(hash<<3)^((*it)->hash());
2717         }
2718         model_print("HASH %u\n", hash);
2719         model_print("---------------------------------------------------------------------\n");
2720 }
2721
2722 #if SUPPORT_MOD_ORDER_DUMP
2723 void ModelChecker::dumpGraph(char *filename) const
2724 {
2725         char buffer[200];
2726         sprintf(buffer, "%s.dot", filename);
2727         FILE *file = fopen(buffer, "w");
2728         fprintf(file, "digraph %s {\n", filename);
2729         mo_graph->dumpNodes(file);
2730         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2731
2732         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2733                 ModelAction *action = *it;
2734                 if (action->is_read()) {
2735                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2736                         if (action->get_reads_from() != NULL)
2737                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2738                 }
2739                 if (thread_array[action->get_tid()] != NULL) {
2740                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2741                 }
2742
2743                 thread_array[action->get_tid()] = action;
2744         }
2745         fprintf(file, "}\n");
2746         model_free(thread_array);
2747         fclose(file);
2748 }
2749 #endif
2750
2751 /** @brief Prints an execution trace summary. */
2752 void ModelChecker::print_summary() const
2753 {
2754 #if SUPPORT_MOD_ORDER_DUMP
2755         char buffername[100];
2756         sprintf(buffername, "exec%04u", stats.num_total);
2757         mo_graph->dumpGraphToFile(buffername);
2758         sprintf(buffername, "graph%04u", stats.num_total);
2759         dumpGraph(buffername);
2760 #endif
2761
2762         model_print("Execution %d:", stats.num_total);
2763         if (isfeasibleprefix())
2764                 model_print("\n");
2765         else
2766                 print_infeasibility(" INFEASIBLE");
2767         print_list(action_trace);
2768         model_print("\n");
2769 }
2770
2771 /**
2772  * Add a Thread to the system for the first time. Should only be called once
2773  * per thread.
2774  * @param t The Thread to add
2775  */
2776 void ModelChecker::add_thread(Thread *t)
2777 {
2778         thread_map->put(id_to_int(t->get_id()), t);
2779         scheduler->add_thread(t);
2780 }
2781
2782 /**
2783  * Removes a thread from the scheduler.
2784  * @param the thread to remove.
2785  */
2786 void ModelChecker::remove_thread(Thread *t)
2787 {
2788         scheduler->remove_thread(t);
2789 }
2790
2791 /**
2792  * @brief Get a Thread reference by its ID
2793  * @param tid The Thread's ID
2794  * @return A Thread reference
2795  */
2796 Thread * ModelChecker::get_thread(thread_id_t tid) const
2797 {
2798         return thread_map->get(id_to_int(tid));
2799 }
2800
2801 /**
2802  * @brief Get a reference to the Thread in which a ModelAction was executed
2803  * @param act The ModelAction
2804  * @return A Thread reference
2805  */
2806 Thread * ModelChecker::get_thread(const ModelAction *act) const
2807 {
2808         return get_thread(act->get_tid());
2809 }
2810
2811 /**
2812  * @brief Check if a Thread is currently enabled
2813  * @param t The Thread to check
2814  * @return True if the Thread is currently enabled
2815  */
2816 bool ModelChecker::is_enabled(Thread *t) const
2817 {
2818         return scheduler->is_enabled(t);
2819 }
2820
2821 /**
2822  * @brief Check if a Thread is currently enabled
2823  * @param tid The ID of the Thread to check
2824  * @return True if the Thread is currently enabled
2825  */
2826 bool ModelChecker::is_enabled(thread_id_t tid) const
2827 {
2828         return scheduler->is_enabled(tid);
2829 }
2830
2831 /**
2832  * Switch from a model-checker context to a user-thread context. This is the
2833  * complement of ModelChecker::switch_to_master and must be called from the
2834  * model-checker context
2835  *
2836  * @param thread The user-thread to switch to
2837  */
2838 void ModelChecker::switch_from_master(Thread *thread)
2839 {
2840         scheduler->set_current_thread(thread);
2841         Thread::swap(&system_context, thread);
2842 }
2843
2844 /**
2845  * Switch from a user-context to the "master thread" context (a.k.a. system
2846  * context). This switch is made with the intention of exploring a particular
2847  * model-checking action (described by a ModelAction object). Must be called
2848  * from a user-thread context.
2849  *
2850  * @param act The current action that will be explored. May be NULL only if
2851  * trace is exiting via an assertion (see ModelChecker::set_assert and
2852  * ModelChecker::has_asserted).
2853  * @return Return the value returned by the current action
2854  */
2855 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2856 {
2857         DBG();
2858         Thread *old = thread_current();
2859         ASSERT(!old->get_pending());
2860         old->set_pending(act);
2861         if (Thread::swap(old, &system_context) < 0) {
2862                 perror("swap threads");
2863                 exit(EXIT_FAILURE);
2864         }
2865         return old->get_return_value();
2866 }
2867
2868 /**
2869  * Takes the next step in the execution, if possible.
2870  * @param curr The current step to take
2871  * @return Returns the next Thread to run, if any; NULL if this execution
2872  * should terminate
2873  */
2874 Thread * ModelChecker::take_step(ModelAction *curr)
2875 {
2876         Thread *curr_thrd = get_thread(curr);
2877         ASSERT(curr_thrd->get_state() == THREAD_READY);
2878
2879         curr = check_current_action(curr);
2880
2881         /* Infeasible -> don't take any more steps */
2882         if (is_infeasible())
2883                 return NULL;
2884         else if (isfeasibleprefix() && have_bug_reports()) {
2885                 set_assert();
2886                 return NULL;
2887         }
2888
2889         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2890                 return NULL;
2891
2892         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2893                 scheduler->remove_thread(curr_thrd);
2894
2895         Thread *next_thrd = get_next_thread(curr);
2896
2897         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2898                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2899
2900         return next_thrd;
2901 }
2902
2903 /** Wrapper to run the user's main function, with appropriate arguments */
2904 void user_main_wrapper(void *)
2905 {
2906         user_main(model->params.argc, model->params.argv);
2907 }
2908
2909 /** @brief Run ModelChecker for the user program */
2910 void ModelChecker::run()
2911 {
2912         do {
2913                 thrd_t user_thread;
2914                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2915                 add_thread(t);
2916
2917                 do {
2918                         /*
2919                          * Stash next pending action(s) for thread(s). There
2920                          * should only need to stash one thread's action--the
2921                          * thread which just took a step--plus the first step
2922                          * for any newly-created thread
2923                          */
2924                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2925                                 thread_id_t tid = int_to_id(i);
2926                                 Thread *thr = get_thread(tid);
2927                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2928                                         switch_from_master(thr);
2929                                 }
2930                         }
2931
2932                         /* Catch assertions from prior take_step or from
2933                          * between-ModelAction bugs (e.g., data races) */
2934                         if (has_asserted())
2935                                 break;
2936
2937                         /* Consume the next action for a Thread */
2938                         ModelAction *curr = t->get_pending();
2939                         t->set_pending(NULL);
2940                         t = take_step(curr);
2941                 } while (t && !t->is_model_thread());
2942
2943                 /*
2944                  * Launch end-of-execution release sequence fixups only when
2945                  * the execution is otherwise feasible AND there are:
2946                  *
2947                  * (1) pending release sequences
2948                  * (2) pending assertions that could be invalidated by a change
2949                  * in clock vectors (i.e., data races)
2950                  * (3) no pending promises
2951                  */
2952                 while (!pending_rel_seqs->empty() &&
2953                                 is_feasible_prefix_ignore_relseq() &&
2954                                 !unrealizedraces.empty()) {
2955                         model_print("*** WARNING: release sequence fixup action "
2956                                         "(%zu pending release seuqence(s)) ***\n",
2957                                         pending_rel_seqs->size());
2958                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2959                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
2960                                         model_thread);
2961                         take_step(fixup);
2962                 };
2963         } while (next_execution());
2964
2965         model_print("******* Model-checking complete: *******\n");
2966         print_stats();
2967 }