impatomic: fences linker error
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Select the next thread to execute based on the curren action
209  *
210  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
211  * actions should be followed by the execution of their child thread. In either
212  * case, the current action should determine the next thread schedule.
213  *
214  * @param curr The current action
215  * @return The next thread to run, if the current action will determine this
216  * selection; otherwise NULL
217  */
218 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
219 {
220         /* Do not split atomic RMW */
221         if (curr->is_rmwr())
222                 return get_thread(curr);
223         /* Follow CREATE with the created thread */
224         if (curr->get_type() == THREAD_CREATE)
225                 return curr->get_thread_operand();
226         return NULL;
227 }
228
229 /**
230  * @brief Choose the next thread to execute.
231  *
232  * This function chooses the next thread that should execute. It can enforce
233  * execution replay/backtracking or, if the model-checker has no preference
234  * regarding the next thread (i.e., when exploring a new execution ordering),
235  * we defer to the scheduler.
236  *
237  * @return The next chosen thread to run, if any exist. Or else if the current
238  * execution should terminate, return NULL.
239  */
240 Thread * ModelChecker::get_next_thread()
241 {
242         thread_id_t tid;
243
244         /*
245          * Have we completed exploring the preselected path? Then let the
246          * scheduler decide
247          */
248         if (diverge == NULL)
249                 return scheduler->select_next_thread();
250
251         /* Else, we are trying to replay an execution */
252         ModelAction *next = node_stack->get_next()->get_action();
253
254         if (next == diverge) {
255                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
256                         earliest_diverge = diverge;
257
258                 Node *nextnode = next->get_node();
259                 Node *prevnode = nextnode->get_parent();
260                 scheduler->update_sleep_set(prevnode);
261
262                 /* Reached divergence point */
263                 if (nextnode->increment_misc()) {
264                         /* The next node will try to satisfy a different misc_index values. */
265                         tid = next->get_tid();
266                         node_stack->pop_restofstack(2);
267                 } else if (nextnode->increment_promise()) {
268                         /* The next node will try to satisfy a different set of promises. */
269                         tid = next->get_tid();
270                         node_stack->pop_restofstack(2);
271                 } else if (nextnode->increment_read_from()) {
272                         /* The next node will read from a different value. */
273                         tid = next->get_tid();
274                         node_stack->pop_restofstack(2);
275                 } else if (nextnode->increment_relseq_break()) {
276                         /* The next node will try to resolve a release sequence differently */
277                         tid = next->get_tid();
278                         node_stack->pop_restofstack(2);
279                 } else {
280                         ASSERT(prevnode);
281                         /* Make a different thread execute for next step */
282                         scheduler->add_sleep(get_thread(next->get_tid()));
283                         tid = prevnode->get_next_backtrack();
284                         /* Make sure the backtracked thread isn't sleeping. */
285                         node_stack->pop_restofstack(1);
286                         if (diverge == earliest_diverge) {
287                                 earliest_diverge = prevnode->get_action();
288                         }
289                 }
290                 /* Start the round robin scheduler from this thread id */
291                 scheduler->set_scheduler_thread(tid);
292                 /* The correct sleep set is in the parent node. */
293                 execute_sleep_set();
294
295                 DEBUG("*** Divergence point ***\n");
296
297                 diverge = NULL;
298         } else {
299                 tid = next->get_tid();
300         }
301         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
302         ASSERT(tid != THREAD_ID_T_NONE);
303         return thread_map->get(id_to_int(tid));
304 }
305
306 /**
307  * We need to know what the next actions of all threads in the sleep
308  * set will be.  This method computes them and stores the actions at
309  * the corresponding thread object's pending action.
310  */
311
312 void ModelChecker::execute_sleep_set()
313 {
314         for (unsigned int i = 0; i < get_num_threads(); i++) {
315                 thread_id_t tid = int_to_id(i);
316                 Thread *thr = get_thread(tid);
317                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
318                         thr->get_pending()->set_sleep_flag();
319                 }
320         }
321 }
322
323 /**
324  * @brief Should the current action wake up a given thread?
325  *
326  * @param curr The current action
327  * @param thread The thread that we might wake up
328  * @return True, if we should wake up the sleeping thread; false otherwise
329  */
330 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
331 {
332         const ModelAction *asleep = thread->get_pending();
333         /* Don't allow partial RMW to wake anyone up */
334         if (curr->is_rmwr())
335                 return false;
336         /* Synchronizing actions may have been backtracked */
337         if (asleep->could_synchronize_with(curr))
338                 return true;
339         /* All acquire/release fences and fence-acquire/store-release */
340         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
341                 return true;
342         /* Fence-release + store can awake load-acquire on the same location */
343         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
344                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
345                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
346                         return true;
347         }
348         return false;
349 }
350
351 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
352 {
353         for (unsigned int i = 0; i < get_num_threads(); i++) {
354                 Thread *thr = get_thread(int_to_id(i));
355                 if (scheduler->is_sleep_set(thr)) {
356                         if (should_wake_up(curr, thr))
357                                 /* Remove this thread from sleep set */
358                                 scheduler->remove_sleep(thr);
359                 }
360         }
361 }
362
363 /** @brief Alert the model-checker that an incorrectly-ordered
364  * synchronization was made */
365 void ModelChecker::set_bad_synchronization()
366 {
367         priv->bad_synchronization = true;
368 }
369
370 /**
371  * Check whether the current trace has triggered an assertion which should halt
372  * its execution.
373  *
374  * @return True, if the execution should be aborted; false otherwise
375  */
376 bool ModelChecker::has_asserted() const
377 {
378         return priv->asserted;
379 }
380
381 /**
382  * Trigger a trace assertion which should cause this execution to be halted.
383  * This can be due to a detected bug or due to an infeasibility that should
384  * halt ASAP.
385  */
386 void ModelChecker::set_assert()
387 {
388         priv->asserted = true;
389 }
390
391 /**
392  * Check if we are in a deadlock. Should only be called at the end of an
393  * execution, although it should not give false positives in the middle of an
394  * execution (there should be some ENABLED thread).
395  *
396  * @return True if program is in a deadlock; false otherwise
397  */
398 bool ModelChecker::is_deadlocked() const
399 {
400         bool blocking_threads = false;
401         for (unsigned int i = 0; i < get_num_threads(); i++) {
402                 thread_id_t tid = int_to_id(i);
403                 if (is_enabled(tid))
404                         return false;
405                 Thread *t = get_thread(tid);
406                 if (!t->is_model_thread() && t->get_pending())
407                         blocking_threads = true;
408         }
409         return blocking_threads;
410 }
411
412 /**
413  * Check if a Thread has entered a circular wait deadlock situation. This will
414  * not check other threads for potential deadlock situations, and may miss
415  * deadlocks involving WAIT.
416  *
417  * @param t The thread which may have entered a deadlock
418  * @return True if this Thread entered a deadlock; false otherwise
419  */
420 bool ModelChecker::is_circular_wait(const Thread *t) const
421 {
422         for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on())
423                 if (waiting == t)
424                         return true;
425         return false;
426 }
427
428 /**
429  * Check if this is a complete execution. That is, have all thread completed
430  * execution (rather than exiting because sleep sets have forced a redundant
431  * execution).
432  *
433  * @return True if the execution is complete.
434  */
435 bool ModelChecker::is_complete_execution() const
436 {
437         for (unsigned int i = 0; i < get_num_threads(); i++)
438                 if (is_enabled(int_to_id(i)))
439                         return false;
440         return true;
441 }
442
443 /**
444  * @brief Assert a bug in the executing program.
445  *
446  * Use this function to assert any sort of bug in the user program. If the
447  * current trace is feasible (actually, a prefix of some feasible execution),
448  * then this execution will be aborted, printing the appropriate message. If
449  * the current trace is not yet feasible, the error message will be stashed and
450  * printed if the execution ever becomes feasible.
451  *
452  * @param msg Descriptive message for the bug (do not include newline char)
453  * @return True if bug is immediately-feasible
454  */
455 bool ModelChecker::assert_bug(const char *msg)
456 {
457         priv->bugs.push_back(new bug_message(msg));
458
459         if (isfeasibleprefix()) {
460                 set_assert();
461                 return true;
462         }
463         return false;
464 }
465
466 /**
467  * @brief Assert a bug in the executing program, asserted by a user thread
468  * @see ModelChecker::assert_bug
469  * @param msg Descriptive message for the bug (do not include newline char)
470  */
471 void ModelChecker::assert_user_bug(const char *msg)
472 {
473         /* If feasible bug, bail out now */
474         if (assert_bug(msg))
475                 switch_to_master(NULL);
476 }
477
478 /** @return True, if any bugs have been reported for this execution */
479 bool ModelChecker::have_bug_reports() const
480 {
481         return priv->bugs.size() != 0;
482 }
483
484 /** @brief Print bug report listing for this execution (if any bugs exist) */
485 void ModelChecker::print_bugs() const
486 {
487         if (have_bug_reports()) {
488                 model_print("Bug report: %zu bug%s detected\n",
489                                 priv->bugs.size(),
490                                 priv->bugs.size() > 1 ? "s" : "");
491                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
492                         priv->bugs[i]->print();
493         }
494 }
495
496 /**
497  * @brief Record end-of-execution stats
498  *
499  * Must be run when exiting an execution. Records various stats.
500  * @see struct execution_stats
501  */
502 void ModelChecker::record_stats()
503 {
504         stats.num_total++;
505         if (!isfeasibleprefix())
506                 stats.num_infeasible++;
507         else if (have_bug_reports())
508                 stats.num_buggy_executions++;
509         else if (is_complete_execution())
510                 stats.num_complete++;
511         else {
512                 stats.num_redundant++;
513
514                 /**
515                  * @todo We can violate this ASSERT() when fairness/sleep sets
516                  * conflict to cause an execution to terminate, e.g. with:
517                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
518                  */
519                 //ASSERT(scheduler->all_threads_sleeping());
520         }
521 }
522
523 /** @brief Print execution stats */
524 void ModelChecker::print_stats() const
525 {
526         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
527         model_print("Number of redundant executions: %d\n", stats.num_redundant);
528         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
529         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
530         model_print("Total executions: %d\n", stats.num_total);
531         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
532 }
533
534 /**
535  * @brief End-of-exeuction print
536  * @param printbugs Should any existing bugs be printed?
537  */
538 void ModelChecker::print_execution(bool printbugs) const
539 {
540         print_program_output();
541
542         if (params.verbose) {
543                 model_print("Earliest divergence point since last feasible execution:\n");
544                 if (earliest_diverge)
545                         earliest_diverge->print();
546                 else
547                         model_print("(Not set)\n");
548
549                 model_print("\n");
550                 print_stats();
551         }
552
553         /* Don't print invalid bugs */
554         if (printbugs)
555                 print_bugs();
556
557         model_print("\n");
558         print_summary();
559 }
560
561 /**
562  * Queries the model-checker for more executions to explore and, if one
563  * exists, resets the model-checker state to execute a new execution.
564  *
565  * @return If there are more executions to explore, return true. Otherwise,
566  * return false.
567  */
568 bool ModelChecker::next_execution()
569 {
570         DBG();
571         /* Is this execution a feasible execution that's worth bug-checking? */
572         bool complete = isfeasibleprefix() && (is_complete_execution() ||
573                         have_bug_reports());
574
575         /* End-of-execution bug checks */
576         if (complete) {
577                 if (is_deadlocked())
578                         assert_bug("Deadlock detected");
579
580                 checkDataRaces();
581         }
582
583         record_stats();
584
585         /* Output */
586         if (params.verbose || (complete && have_bug_reports()))
587                 print_execution(complete);
588         else
589                 clear_program_output();
590
591         if (complete)
592                 earliest_diverge = NULL;
593
594         if ((diverge = get_next_backtrack()) == NULL)
595                 return false;
596
597         if (DBG_ENABLED()) {
598                 model_print("Next execution will diverge at:\n");
599                 diverge->print();
600         }
601
602         reset_to_initial_state();
603         return true;
604 }
605
606 /**
607  * @brief Find the last fence-related backtracking conflict for a ModelAction
608  *
609  * This function performs the search for the most recent conflicting action
610  * against which we should perform backtracking, as affected by fence
611  * operations. This includes pairs of potentially-synchronizing actions which
612  * occur due to fence-acquire or fence-release, and hence should be explored in
613  * the opposite execution order.
614  *
615  * @param act The current action
616  * @return The most recent action which conflicts with act due to fences
617  */
618 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
619 {
620         /* Only perform release/acquire fence backtracking for stores */
621         if (!act->is_write())
622                 return NULL;
623
624         /* Find a fence-release (or, act is a release) */
625         ModelAction *last_release;
626         if (act->is_release())
627                 last_release = act;
628         else
629                 last_release = get_last_fence_release(act->get_tid());
630         if (!last_release)
631                 return NULL;
632
633         /* Skip past the release */
634         action_list_t *list = action_trace;
635         action_list_t::reverse_iterator rit;
636         for (rit = list->rbegin(); rit != list->rend(); rit++)
637                 if (*rit == last_release)
638                         break;
639         ASSERT(rit != list->rend());
640
641         /* Find a prior:
642          *   load-acquire
643          * or
644          *   load --sb-> fence-acquire */
645         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
646         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
647         bool found_acquire_fences = false;
648         for ( ; rit != list->rend(); rit++) {
649                 ModelAction *prev = *rit;
650                 if (act->same_thread(prev))
651                         continue;
652
653                 int tid = id_to_int(prev->get_tid());
654
655                 if (prev->is_read() && act->same_var(prev)) {
656                         if (prev->is_acquire()) {
657                                 /* Found most recent load-acquire, don't need
658                                  * to search for more fences */
659                                 if (!found_acquire_fences)
660                                         return NULL;
661                         } else {
662                                 prior_loads[tid] = prev;
663                         }
664                 }
665                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
666                         found_acquire_fences = true;
667                         acquire_fences[tid] = prev;
668                 }
669         }
670
671         ModelAction *latest_backtrack = NULL;
672         for (unsigned int i = 0; i < acquire_fences.size(); i++)
673                 if (acquire_fences[i] && prior_loads[i])
674                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
675                                 latest_backtrack = acquire_fences[i];
676         return latest_backtrack;
677 }
678
679 /**
680  * @brief Find the last backtracking conflict for a ModelAction
681  *
682  * This function performs the search for the most recent conflicting action
683  * against which we should perform backtracking. This primary includes pairs of
684  * synchronizing actions which should be explored in the opposite execution
685  * order.
686  *
687  * @param act The current action
688  * @return The most recent action which conflicts with act
689  */
690 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
691 {
692         switch (act->get_type()) {
693         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
694         case ATOMIC_READ:
695         case ATOMIC_WRITE:
696         case ATOMIC_RMW: {
697                 ModelAction *ret = NULL;
698
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (prev->could_synchronize_with(act)) {
705                                 ret = prev;
706                                 break;
707                         }
708                 }
709
710                 ModelAction *ret2 = get_last_fence_conflict(act);
711                 if (!ret2)
712                         return ret;
713                 if (!ret)
714                         return ret2;
715                 if (*ret < *ret2)
716                         return ret2;
717                 return ret;
718         }
719         case ATOMIC_LOCK:
720         case ATOMIC_TRYLOCK: {
721                 /* linear search: from most recent to oldest */
722                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
723                 action_list_t::reverse_iterator rit;
724                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
725                         ModelAction *prev = *rit;
726                         if (act->is_conflicting_lock(prev))
727                                 return prev;
728                 }
729                 break;
730         }
731         case ATOMIC_UNLOCK: {
732                 /* linear search: from most recent to oldest */
733                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
734                 action_list_t::reverse_iterator rit;
735                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
736                         ModelAction *prev = *rit;
737                         if (!act->same_thread(prev) && prev->is_failed_trylock())
738                                 return prev;
739                 }
740                 break;
741         }
742         case ATOMIC_WAIT: {
743                 /* linear search: from most recent to oldest */
744                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
745                 action_list_t::reverse_iterator rit;
746                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
747                         ModelAction *prev = *rit;
748                         if (!act->same_thread(prev) && prev->is_failed_trylock())
749                                 return prev;
750                         if (!act->same_thread(prev) && prev->is_notify())
751                                 return prev;
752                 }
753                 break;
754         }
755
756         case ATOMIC_NOTIFY_ALL:
757         case ATOMIC_NOTIFY_ONE: {
758                 /* linear search: from most recent to oldest */
759                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
760                 action_list_t::reverse_iterator rit;
761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
762                         ModelAction *prev = *rit;
763                         if (!act->same_thread(prev) && prev->is_wait())
764                                 return prev;
765                 }
766                 break;
767         }
768         default:
769                 break;
770         }
771         return NULL;
772 }
773
774 /** This method finds backtracking points where we should try to
775  * reorder the parameter ModelAction against.
776  *
777  * @param the ModelAction to find backtracking points for.
778  */
779 void ModelChecker::set_backtracking(ModelAction *act)
780 {
781         Thread *t = get_thread(act);
782         ModelAction *prev = get_last_conflict(act);
783         if (prev == NULL)
784                 return;
785
786         Node *node = prev->get_node()->get_parent();
787
788         int low_tid, high_tid;
789         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
790                 low_tid = id_to_int(act->get_tid());
791                 high_tid = low_tid + 1;
792         } else {
793                 low_tid = 0;
794                 high_tid = get_num_threads();
795         }
796
797         for (int i = low_tid; i < high_tid; i++) {
798                 thread_id_t tid = int_to_id(i);
799
800                 /* Make sure this thread can be enabled here. */
801                 if (i >= node->get_num_threads())
802                         break;
803
804                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
805                 if (node->enabled_status(tid) != THREAD_ENABLED)
806                         continue;
807
808                 /* Check if this has been explored already */
809                 if (node->has_been_explored(tid))
810                         continue;
811
812                 /* See if fairness allows */
813                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
814                         bool unfair = false;
815                         for (int t = 0; t < node->get_num_threads(); t++) {
816                                 thread_id_t tother = int_to_id(t);
817                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
818                                         unfair = true;
819                                         break;
820                                 }
821                         }
822                         if (unfair)
823                                 continue;
824                 }
825                 /* Cache the latest backtracking point */
826                 set_latest_backtrack(prev);
827
828                 /* If this is a new backtracking point, mark the tree */
829                 if (!node->set_backtrack(tid))
830                         continue;
831                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
832                                         id_to_int(prev->get_tid()),
833                                         id_to_int(t->get_id()));
834                 if (DBG_ENABLED()) {
835                         prev->print();
836                         act->print();
837                 }
838         }
839 }
840
841 /**
842  * @brief Cache the a backtracking point as the "most recent", if eligible
843  *
844  * Note that this does not prepare the NodeStack for this backtracking
845  * operation, it only caches the action on a per-execution basis
846  *
847  * @param act The operation at which we should explore a different next action
848  * (i.e., backtracking point)
849  * @return True, if this action is now the most recent backtracking point;
850  * false otherwise
851  */
852 bool ModelChecker::set_latest_backtrack(ModelAction *act)
853 {
854         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
855                 priv->next_backtrack = act;
856                 return true;
857         }
858         return false;
859 }
860
861 /**
862  * Returns last backtracking point. The model checker will explore a different
863  * path for this point in the next execution.
864  * @return The ModelAction at which the next execution should diverge.
865  */
866 ModelAction * ModelChecker::get_next_backtrack()
867 {
868         ModelAction *next = priv->next_backtrack;
869         priv->next_backtrack = NULL;
870         return next;
871 }
872
873 /**
874  * Processes a read model action.
875  * @param curr is the read model action to process.
876  * @return True if processing this read updates the mo_graph.
877  */
878 bool ModelChecker::process_read(ModelAction *curr)
879 {
880         Node *node = curr->get_node();
881         while (true) {
882                 bool updated = false;
883                 switch (node->get_read_from_status()) {
884                 case READ_FROM_PAST: {
885                         const ModelAction *rf = node->get_read_from_past();
886                         ASSERT(rf);
887
888                         mo_graph->startChanges();
889
890                         ASSERT(!is_infeasible());
891                         if (!check_recency(curr, rf)) {
892                                 if (node->increment_read_from()) {
893                                         mo_graph->rollbackChanges();
894                                         continue;
895                                 } else {
896                                         priv->too_many_reads = true;
897                                 }
898                         }
899
900                         updated = r_modification_order(curr, rf);
901                         read_from(curr, rf);
902                         mo_graph->commitChanges();
903                         mo_check_promises(curr, true);
904                         break;
905                 }
906                 case READ_FROM_PROMISE: {
907                         Promise *promise = curr->get_node()->get_read_from_promise();
908                         if (promise->add_reader(curr))
909                                 priv->failed_promise = true;
910                         curr->set_read_from_promise(promise);
911                         mo_graph->startChanges();
912                         if (!check_recency(curr, promise))
913                                 priv->too_many_reads = true;
914                         updated = r_modification_order(curr, promise);
915                         mo_graph->commitChanges();
916                         break;
917                 }
918                 case READ_FROM_FUTURE: {
919                         /* Read from future value */
920                         struct future_value fv = node->get_future_value();
921                         Promise *promise = new Promise(curr, fv);
922                         curr->set_read_from_promise(promise);
923                         promises->push_back(promise);
924                         mo_graph->startChanges();
925                         updated = r_modification_order(curr, promise);
926                         mo_graph->commitChanges();
927                         break;
928                 }
929                 default:
930                         ASSERT(false);
931                 }
932                 get_thread(curr)->set_return_value(curr->get_return_value());
933                 return updated;
934         }
935 }
936
937 /**
938  * Processes a lock, trylock, or unlock model action.  @param curr is
939  * the read model action to process.
940  *
941  * The try lock operation checks whether the lock is taken.  If not,
942  * it falls to the normal lock operation case.  If so, it returns
943  * fail.
944  *
945  * The lock operation has already been checked that it is enabled, so
946  * it just grabs the lock and synchronizes with the previous unlock.
947  *
948  * The unlock operation has to re-enable all of the threads that are
949  * waiting on the lock.
950  *
951  * @return True if synchronization was updated; false otherwise
952  */
953 bool ModelChecker::process_mutex(ModelAction *curr)
954 {
955         std::mutex *mutex = curr->get_mutex();
956         struct std::mutex_state *state = NULL;
957
958         if (mutex)
959                 state = mutex->get_state();
960
961         switch (curr->get_type()) {
962         case ATOMIC_TRYLOCK: {
963                 bool success = !state->locked;
964                 curr->set_try_lock(success);
965                 if (!success) {
966                         get_thread(curr)->set_return_value(0);
967                         break;
968                 }
969                 get_thread(curr)->set_return_value(1);
970         }
971                 //otherwise fall into the lock case
972         case ATOMIC_LOCK: {
973                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
974                         assert_bug("Lock access before initialization");
975                 state->locked = get_thread(curr);
976                 ModelAction *unlock = get_last_unlock(curr);
977                 //synchronize with the previous unlock statement
978                 if (unlock != NULL) {
979                         curr->synchronize_with(unlock);
980                         return true;
981                 }
982                 break;
983         }
984         case ATOMIC_UNLOCK: {
985                 //unlock the lock
986                 state->locked = NULL;
987                 //wake up the other threads
988                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
989                 //activate all the waiting threads
990                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
991                         scheduler->wake(get_thread(*rit));
992                 }
993                 waiters->clear();
994                 break;
995         }
996         case ATOMIC_WAIT: {
997                 //unlock the lock
998                 state->locked = NULL;
999                 //wake up the other threads
1000                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
1001                 //activate all the waiting threads
1002                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1003                         scheduler->wake(get_thread(*rit));
1004                 }
1005                 waiters->clear();
1006                 //check whether we should go to sleep or not...simulate spurious failures
1007                 if (curr->get_node()->get_misc() == 0) {
1008                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1009                         //disable us
1010                         scheduler->sleep(get_thread(curr));
1011                 }
1012                 break;
1013         }
1014         case ATOMIC_NOTIFY_ALL: {
1015                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1016                 //activate all the waiting threads
1017                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1018                         scheduler->wake(get_thread(*rit));
1019                 }
1020                 waiters->clear();
1021                 break;
1022         }
1023         case ATOMIC_NOTIFY_ONE: {
1024                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1025                 int wakeupthread = curr->get_node()->get_misc();
1026                 action_list_t::iterator it = waiters->begin();
1027                 advance(it, wakeupthread);
1028                 scheduler->wake(get_thread(*it));
1029                 waiters->erase(it);
1030                 break;
1031         }
1032
1033         default:
1034                 ASSERT(0);
1035         }
1036         return false;
1037 }
1038
1039 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1040 {
1041         /* Do more ambitious checks now that mo is more complete */
1042         if (mo_may_allow(writer, reader)) {
1043                 Node *node = reader->get_node();
1044
1045                 /* Find an ancestor thread which exists at the time of the reader */
1046                 Thread *write_thread = get_thread(writer);
1047                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1048                         write_thread = write_thread->get_parent();
1049
1050                 struct future_value fv = {
1051                         writer->get_write_value(),
1052                         writer->get_seq_number() + params.maxfuturedelay,
1053                         write_thread->get_id(),
1054                 };
1055                 if (node->add_future_value(fv))
1056                         set_latest_backtrack(reader);
1057         }
1058 }
1059
1060 /**
1061  * Process a write ModelAction
1062  * @param curr The ModelAction to process
1063  * @return True if the mo_graph was updated or promises were resolved
1064  */
1065 bool ModelChecker::process_write(ModelAction *curr)
1066 {
1067         /* Readers to which we may send our future value */
1068         std::vector< ModelAction *, ModelAlloc<ModelAction *> > send_fv;
1069
1070         bool updated_mod_order = w_modification_order(curr, &send_fv);
1071         int promise_idx = get_promise_to_resolve(curr);
1072         const ModelAction *earliest_promise_reader;
1073         bool updated_promises = false;
1074
1075         if (promise_idx >= 0) {
1076                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1077                 updated_promises = resolve_promise(curr, promise_idx);
1078         } else
1079                 earliest_promise_reader = NULL;
1080
1081         /* Don't send future values to reads after the Promise we resolve */
1082         for (unsigned int i = 0; i < send_fv.size(); i++) {
1083                 ModelAction *read = send_fv[i];
1084                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1085                         futurevalues->push_back(PendingFutureValue(curr, read));
1086         }
1087
1088         if (promises->size() == 0) {
1089                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1090                         struct PendingFutureValue pfv = (*futurevalues)[i];
1091                         add_future_value(pfv.writer, pfv.act);
1092                 }
1093                 futurevalues->clear();
1094         }
1095
1096         mo_graph->commitChanges();
1097         mo_check_promises(curr, false);
1098
1099         get_thread(curr)->set_return_value(VALUE_NONE);
1100         return updated_mod_order || updated_promises;
1101 }
1102
1103 /**
1104  * Process a fence ModelAction
1105  * @param curr The ModelAction to process
1106  * @return True if synchronization was updated
1107  */
1108 bool ModelChecker::process_fence(ModelAction *curr)
1109 {
1110         /*
1111          * fence-relaxed: no-op
1112          * fence-release: only log the occurence (not in this function), for
1113          *   use in later synchronization
1114          * fence-acquire (this function): search for hypothetical release
1115          *   sequences
1116          */
1117         bool updated = false;
1118         if (curr->is_acquire()) {
1119                 action_list_t *list = action_trace;
1120                 action_list_t::reverse_iterator rit;
1121                 /* Find X : is_read(X) && X --sb-> curr */
1122                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1123                         ModelAction *act = *rit;
1124                         if (act == curr)
1125                                 continue;
1126                         if (act->get_tid() != curr->get_tid())
1127                                 continue;
1128                         /* Stop at the beginning of the thread */
1129                         if (act->is_thread_start())
1130                                 break;
1131                         /* Stop once we reach a prior fence-acquire */
1132                         if (act->is_fence() && act->is_acquire())
1133                                 break;
1134                         if (!act->is_read())
1135                                 continue;
1136                         /* read-acquire will find its own release sequences */
1137                         if (act->is_acquire())
1138                                 continue;
1139
1140                         /* Establish hypothetical release sequences */
1141                         rel_heads_list_t release_heads;
1142                         get_release_seq_heads(curr, act, &release_heads);
1143                         for (unsigned int i = 0; i < release_heads.size(); i++)
1144                                 if (!curr->synchronize_with(release_heads[i]))
1145                                         set_bad_synchronization();
1146                         if (release_heads.size() != 0)
1147                                 updated = true;
1148                 }
1149         }
1150         return updated;
1151 }
1152
1153 /**
1154  * @brief Process the current action for thread-related activity
1155  *
1156  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1157  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1158  * synchronization, etc.  This function is a no-op for non-THREAD actions
1159  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1160  *
1161  * @param curr The current action
1162  * @return True if synchronization was updated or a thread completed
1163  */
1164 bool ModelChecker::process_thread_action(ModelAction *curr)
1165 {
1166         bool updated = false;
1167
1168         switch (curr->get_type()) {
1169         case THREAD_CREATE: {
1170                 thrd_t *thrd = (thrd_t *)curr->get_location();
1171                 struct thread_params *params = (struct thread_params *)curr->get_value();
1172                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1173                 add_thread(th);
1174                 th->set_creation(curr);
1175                 /* Promises can be satisfied by children */
1176                 for (unsigned int i = 0; i < promises->size(); i++) {
1177                         Promise *promise = (*promises)[i];
1178                         if (promise->thread_is_available(curr->get_tid()))
1179                                 promise->add_thread(th->get_id());
1180                 }
1181                 break;
1182         }
1183         case THREAD_JOIN: {
1184                 Thread *blocking = curr->get_thread_operand();
1185                 ModelAction *act = get_last_action(blocking->get_id());
1186                 curr->synchronize_with(act);
1187                 updated = true; /* trigger rel-seq checks */
1188                 break;
1189         }
1190         case THREAD_FINISH: {
1191                 Thread *th = get_thread(curr);
1192                 while (!th->wait_list_empty()) {
1193                         ModelAction *act = th->pop_wait_list();
1194                         scheduler->wake(get_thread(act));
1195                 }
1196                 th->complete();
1197                 /* Completed thread can't satisfy promises */
1198                 for (unsigned int i = 0; i < promises->size(); i++) {
1199                         Promise *promise = (*promises)[i];
1200                         if (promise->thread_is_available(th->get_id()))
1201                                 if (promise->eliminate_thread(th->get_id()))
1202                                         priv->failed_promise = true;
1203                 }
1204                 updated = true; /* trigger rel-seq checks */
1205                 break;
1206         }
1207         case THREAD_START: {
1208                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1209                 break;
1210         }
1211         default:
1212                 break;
1213         }
1214
1215         return updated;
1216 }
1217
1218 /**
1219  * @brief Process the current action for release sequence fixup activity
1220  *
1221  * Performs model-checker release sequence fixups for the current action,
1222  * forcing a single pending release sequence to break (with a given, potential
1223  * "loose" write) or to complete (i.e., synchronize). If a pending release
1224  * sequence forms a complete release sequence, then we must perform the fixup
1225  * synchronization, mo_graph additions, etc.
1226  *
1227  * @param curr The current action; must be a release sequence fixup action
1228  * @param work_queue The work queue to which to add work items as they are
1229  * generated
1230  */
1231 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1232 {
1233         const ModelAction *write = curr->get_node()->get_relseq_break();
1234         struct release_seq *sequence = pending_rel_seqs->back();
1235         pending_rel_seqs->pop_back();
1236         ASSERT(sequence);
1237         ModelAction *acquire = sequence->acquire;
1238         const ModelAction *rf = sequence->rf;
1239         const ModelAction *release = sequence->release;
1240         ASSERT(acquire);
1241         ASSERT(release);
1242         ASSERT(rf);
1243         ASSERT(release->same_thread(rf));
1244
1245         if (write == NULL) {
1246                 /**
1247                  * @todo Forcing a synchronization requires that we set
1248                  * modification order constraints. For instance, we can't allow
1249                  * a fixup sequence in which two separate read-acquire
1250                  * operations read from the same sequence, where the first one
1251                  * synchronizes and the other doesn't. Essentially, we can't
1252                  * allow any writes to insert themselves between 'release' and
1253                  * 'rf'
1254                  */
1255
1256                 /* Must synchronize */
1257                 if (!acquire->synchronize_with(release)) {
1258                         set_bad_synchronization();
1259                         return;
1260                 }
1261                 /* Re-check all pending release sequences */
1262                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1263                 /* Re-check act for mo_graph edges */
1264                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1265
1266                 /* propagate synchronization to later actions */
1267                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1268                 for (; (*rit) != acquire; rit++) {
1269                         ModelAction *propagate = *rit;
1270                         if (acquire->happens_before(propagate)) {
1271                                 propagate->synchronize_with(acquire);
1272                                 /* Re-check 'propagate' for mo_graph edges */
1273                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1274                         }
1275                 }
1276         } else {
1277                 /* Break release sequence with new edges:
1278                  *   release --mo--> write --mo--> rf */
1279                 mo_graph->addEdge(release, write);
1280                 mo_graph->addEdge(write, rf);
1281         }
1282
1283         /* See if we have realized a data race */
1284         checkDataRaces();
1285 }
1286
1287 /**
1288  * Initialize the current action by performing one or more of the following
1289  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1290  * in the NodeStack, manipulating backtracking sets, allocating and
1291  * initializing clock vectors, and computing the promises to fulfill.
1292  *
1293  * @param curr The current action, as passed from the user context; may be
1294  * freed/invalidated after the execution of this function, with a different
1295  * action "returned" its place (pass-by-reference)
1296  * @return True if curr is a newly-explored action; false otherwise
1297  */
1298 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1299 {
1300         ModelAction *newcurr;
1301
1302         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1303                 newcurr = process_rmw(*curr);
1304                 delete *curr;
1305
1306                 if (newcurr->is_rmw())
1307                         compute_promises(newcurr);
1308
1309                 *curr = newcurr;
1310                 return false;
1311         }
1312
1313         (*curr)->set_seq_number(get_next_seq_num());
1314
1315         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1316         if (newcurr) {
1317                 /* First restore type and order in case of RMW operation */
1318                 if ((*curr)->is_rmwr())
1319                         newcurr->copy_typeandorder(*curr);
1320
1321                 ASSERT((*curr)->get_location() == newcurr->get_location());
1322                 newcurr->copy_from_new(*curr);
1323
1324                 /* Discard duplicate ModelAction; use action from NodeStack */
1325                 delete *curr;
1326
1327                 /* Always compute new clock vector */
1328                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1329
1330                 *curr = newcurr;
1331                 return false; /* Action was explored previously */
1332         } else {
1333                 newcurr = *curr;
1334
1335                 /* Always compute new clock vector */
1336                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1337
1338                 /* Assign most recent release fence */
1339                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1340
1341                 /*
1342                  * Perform one-time actions when pushing new ModelAction onto
1343                  * NodeStack
1344                  */
1345                 if (newcurr->is_write())
1346                         compute_promises(newcurr);
1347                 else if (newcurr->is_relseq_fixup())
1348                         compute_relseq_breakwrites(newcurr);
1349                 else if (newcurr->is_wait())
1350                         newcurr->get_node()->set_misc_max(2);
1351                 else if (newcurr->is_notify_one()) {
1352                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1353                 }
1354                 return true; /* This was a new ModelAction */
1355         }
1356 }
1357
1358 /**
1359  * @brief Establish reads-from relation between two actions
1360  *
1361  * Perform basic operations involved with establishing a concrete rf relation,
1362  * including setting the ModelAction data and checking for release sequences.
1363  *
1364  * @param act The action that is reading (must be a read)
1365  * @param rf The action from which we are reading (must be a write)
1366  *
1367  * @return True if this read established synchronization
1368  */
1369 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1370 {
1371         ASSERT(rf);
1372         act->set_read_from(rf);
1373         if (act->is_acquire()) {
1374                 rel_heads_list_t release_heads;
1375                 get_release_seq_heads(act, act, &release_heads);
1376                 int num_heads = release_heads.size();
1377                 for (unsigned int i = 0; i < release_heads.size(); i++)
1378                         if (!act->synchronize_with(release_heads[i])) {
1379                                 set_bad_synchronization();
1380                                 num_heads--;
1381                         }
1382                 return num_heads > 0;
1383         }
1384         return false;
1385 }
1386
1387 /**
1388  * Check promises and eliminate potentially-satisfying threads when a thread is
1389  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1390  * no longer satisfy a promise generated from that thread.
1391  *
1392  * @param blocker The thread on which a thread is waiting
1393  * @param waiting The waiting thread
1394  */
1395 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1396 {
1397         for (unsigned int i = 0; i < promises->size(); i++) {
1398                 Promise *promise = (*promises)[i];
1399                 if (!promise->thread_is_available(waiting->get_id()))
1400                         continue;
1401                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1402                         ModelAction *reader = promise->get_reader(j);
1403                         if (reader->get_tid() != blocker->get_id())
1404                                 continue;
1405                         if (promise->eliminate_thread(waiting->get_id())) {
1406                                 /* Promise has failed */
1407                                 priv->failed_promise = true;
1408                         } else {
1409                                 /* Only eliminate the 'waiting' thread once */
1410                                 return;
1411                         }
1412                 }
1413         }
1414 }
1415
1416 /**
1417  * @brief Check whether a model action is enabled.
1418  *
1419  * Checks whether a lock or join operation would be successful (i.e., is the
1420  * lock already locked, or is the joined thread already complete). If not, put
1421  * the action in a waiter list.
1422  *
1423  * @param curr is the ModelAction to check whether it is enabled.
1424  * @return a bool that indicates whether the action is enabled.
1425  */
1426 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1427         if (curr->is_lock()) {
1428                 std::mutex *lock = (std::mutex *)curr->get_location();
1429                 struct std::mutex_state *state = lock->get_state();
1430                 if (state->locked) {
1431                         //Stick the action in the appropriate waiting queue
1432                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1433                         return false;
1434                 }
1435         } else if (curr->get_type() == THREAD_JOIN) {
1436                 Thread *blocking = (Thread *)curr->get_location();
1437                 if (!blocking->is_complete()) {
1438                         blocking->push_wait_list(curr);
1439                         thread_blocking_check_promises(blocking, get_thread(curr));
1440                         return false;
1441                 }
1442         }
1443
1444         return true;
1445 }
1446
1447 /**
1448  * This is the heart of the model checker routine. It performs model-checking
1449  * actions corresponding to a given "current action." Among other processes, it
1450  * calculates reads-from relationships, updates synchronization clock vectors,
1451  * forms a memory_order constraints graph, and handles replay/backtrack
1452  * execution when running permutations of previously-observed executions.
1453  *
1454  * @param curr The current action to process
1455  * @return The ModelAction that is actually executed; may be different than
1456  * curr; may be NULL, if the current action is not enabled to run
1457  */
1458 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1459 {
1460         ASSERT(curr);
1461         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1462
1463         if (!check_action_enabled(curr)) {
1464                 /* Make the execution look like we chose to run this action
1465                  * much later, when a lock/join can succeed */
1466                 get_thread(curr)->set_pending(curr);
1467                 scheduler->sleep(get_thread(curr));
1468                 return NULL;
1469         }
1470
1471         bool newly_explored = initialize_curr_action(&curr);
1472
1473         DBG();
1474         if (DBG_ENABLED())
1475                 curr->print();
1476
1477         wake_up_sleeping_actions(curr);
1478
1479         /* Add the action to lists before any other model-checking tasks */
1480         if (!second_part_of_rmw)
1481                 add_action_to_lists(curr);
1482
1483         /* Build may_read_from set for newly-created actions */
1484         if (newly_explored && curr->is_read())
1485                 build_may_read_from(curr);
1486
1487         /* Initialize work_queue with the "current action" work */
1488         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1489         while (!work_queue.empty() && !has_asserted()) {
1490                 WorkQueueEntry work = work_queue.front();
1491                 work_queue.pop_front();
1492
1493                 switch (work.type) {
1494                 case WORK_CHECK_CURR_ACTION: {
1495                         ModelAction *act = work.action;
1496                         bool update = false; /* update this location's release seq's */
1497                         bool update_all = false; /* update all release seq's */
1498
1499                         if (process_thread_action(curr))
1500                                 update_all = true;
1501
1502                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1503                                 update = true;
1504
1505                         if (act->is_write() && process_write(act))
1506                                 update = true;
1507
1508                         if (act->is_fence() && process_fence(act))
1509                                 update_all = true;
1510
1511                         if (act->is_mutex_op() && process_mutex(act))
1512                                 update_all = true;
1513
1514                         if (act->is_relseq_fixup())
1515                                 process_relseq_fixup(curr, &work_queue);
1516
1517                         if (update_all)
1518                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1519                         else if (update)
1520                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1521                         break;
1522                 }
1523                 case WORK_CHECK_RELEASE_SEQ:
1524                         resolve_release_sequences(work.location, &work_queue);
1525                         break;
1526                 case WORK_CHECK_MO_EDGES: {
1527                         /** @todo Complete verification of work_queue */
1528                         ModelAction *act = work.action;
1529                         bool updated = false;
1530
1531                         if (act->is_read()) {
1532                                 const ModelAction *rf = act->get_reads_from();
1533                                 const Promise *promise = act->get_reads_from_promise();
1534                                 if (rf) {
1535                                         if (r_modification_order(act, rf))
1536                                                 updated = true;
1537                                 } else if (promise) {
1538                                         if (r_modification_order(act, promise))
1539                                                 updated = true;
1540                                 }
1541                         }
1542                         if (act->is_write()) {
1543                                 if (w_modification_order(act, NULL))
1544                                         updated = true;
1545                         }
1546                         mo_graph->commitChanges();
1547
1548                         if (updated)
1549                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1550                         break;
1551                 }
1552                 default:
1553                         ASSERT(false);
1554                         break;
1555                 }
1556         }
1557
1558         check_curr_backtracking(curr);
1559         set_backtracking(curr);
1560         return curr;
1561 }
1562
1563 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1564 {
1565         Node *currnode = curr->get_node();
1566         Node *parnode = currnode->get_parent();
1567
1568         if ((parnode && !parnode->backtrack_empty()) ||
1569                          !currnode->misc_empty() ||
1570                          !currnode->read_from_empty() ||
1571                          !currnode->promise_empty() ||
1572                          !currnode->relseq_break_empty()) {
1573                 set_latest_backtrack(curr);
1574         }
1575 }
1576
1577 bool ModelChecker::promises_expired() const
1578 {
1579         for (unsigned int i = 0; i < promises->size(); i++) {
1580                 Promise *promise = (*promises)[i];
1581                 if (promise->get_expiration() < priv->used_sequence_numbers)
1582                         return true;
1583         }
1584         return false;
1585 }
1586
1587 /**
1588  * This is the strongest feasibility check available.
1589  * @return whether the current trace (partial or complete) must be a prefix of
1590  * a feasible trace.
1591  */
1592 bool ModelChecker::isfeasibleprefix() const
1593 {
1594         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1595 }
1596
1597 /**
1598  * Print disagnostic information about an infeasible execution
1599  * @param prefix A string to prefix the output with; if NULL, then a default
1600  * message prefix will be provided
1601  */
1602 void ModelChecker::print_infeasibility(const char *prefix) const
1603 {
1604         char buf[100];
1605         char *ptr = buf;
1606         if (mo_graph->checkForCycles())
1607                 ptr += sprintf(ptr, "[mo cycle]");
1608         if (priv->failed_promise)
1609                 ptr += sprintf(ptr, "[failed promise]");
1610         if (priv->too_many_reads)
1611                 ptr += sprintf(ptr, "[too many reads]");
1612         if (priv->no_valid_reads)
1613                 ptr += sprintf(ptr, "[no valid reads-from]");
1614         if (priv->bad_synchronization)
1615                 ptr += sprintf(ptr, "[bad sw ordering]");
1616         if (promises_expired())
1617                 ptr += sprintf(ptr, "[promise expired]");
1618         if (promises->size() != 0)
1619                 ptr += sprintf(ptr, "[unresolved promise]");
1620         if (ptr != buf)
1621                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1622 }
1623
1624 /**
1625  * Returns whether the current completed trace is feasible, except for pending
1626  * release sequences.
1627  */
1628 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1629 {
1630         return !is_infeasible() && promises->size() == 0;
1631 }
1632
1633 /**
1634  * Check if the current partial trace is infeasible. Does not check any
1635  * end-of-execution flags, which might rule out the execution. Thus, this is
1636  * useful only for ruling an execution as infeasible.
1637  * @return whether the current partial trace is infeasible.
1638  */
1639 bool ModelChecker::is_infeasible() const
1640 {
1641         return mo_graph->checkForCycles() ||
1642                 priv->no_valid_reads ||
1643                 priv->failed_promise ||
1644                 priv->too_many_reads ||
1645                 priv->bad_synchronization ||
1646                 promises_expired();
1647 }
1648
1649 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1650 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1651         ModelAction *lastread = get_last_action(act->get_tid());
1652         lastread->process_rmw(act);
1653         if (act->is_rmw()) {
1654                 if (lastread->get_reads_from())
1655                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1656                 else
1657                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1658                 mo_graph->commitChanges();
1659         }
1660         return lastread;
1661 }
1662
1663 /**
1664  * A helper function for ModelChecker::check_recency, to check if the current
1665  * thread is able to read from a different write/promise for 'params.maxreads'
1666  * number of steps and if that write/promise should become visible (i.e., is
1667  * ordered later in the modification order). This helps model memory liveness.
1668  *
1669  * @param curr The current action. Must be a read.
1670  * @param rf The write/promise from which we plan to read
1671  * @param other_rf The write/promise from which we may read
1672  * @return True if we were able to read from other_rf for params.maxreads steps
1673  */
1674 template <typename T, typename U>
1675 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1676 {
1677         /* Need a different write/promise */
1678         if (other_rf->equals(rf))
1679                 return false;
1680
1681         /* Only look for "newer" writes/promises */
1682         if (!mo_graph->checkReachable(rf, other_rf))
1683                 return false;
1684
1685         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1686         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1687         action_list_t::reverse_iterator rit = list->rbegin();
1688         ASSERT((*rit) == curr);
1689         /* Skip past curr */
1690         rit++;
1691
1692         /* Does this write/promise work for everyone? */
1693         for (int i = 0; i < params.maxreads; i++, rit++) {
1694                 ModelAction *act = *rit;
1695                 if (!act->may_read_from(other_rf))
1696                         return false;
1697         }
1698         return true;
1699 }
1700
1701 /**
1702  * Checks whether a thread has read from the same write or Promise for too many
1703  * times without seeing the effects of a later write/Promise.
1704  *
1705  * Basic idea:
1706  * 1) there must a different write/promise that we could read from,
1707  * 2) we must have read from the same write/promise in excess of maxreads times,
1708  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1709  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1710  *
1711  * If so, we decide that the execution is no longer feasible.
1712  *
1713  * @param curr The current action. Must be a read.
1714  * @param rf The ModelAction/Promise from which we might read.
1715  * @return True if the read should succeed; false otherwise
1716  */
1717 template <typename T>
1718 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1719 {
1720         if (!params.maxreads)
1721                 return true;
1722
1723         //NOTE: Next check is just optimization, not really necessary....
1724         if (curr->get_node()->get_read_from_past_size() +
1725                         curr->get_node()->get_read_from_promise_size() <= 1)
1726                 return true;
1727
1728         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1729         int tid = id_to_int(curr->get_tid());
1730         ASSERT(tid < (int)thrd_lists->size());
1731         action_list_t *list = &(*thrd_lists)[tid];
1732         action_list_t::reverse_iterator rit = list->rbegin();
1733         ASSERT((*rit) == curr);
1734         /* Skip past curr */
1735         rit++;
1736
1737         action_list_t::reverse_iterator ritcopy = rit;
1738         /* See if we have enough reads from the same value */
1739         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1740                 if (ritcopy == list->rend())
1741                         return true;
1742                 ModelAction *act = *ritcopy;
1743                 if (!act->is_read())
1744                         return true;
1745                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1746                         return true;
1747                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1748                         return true;
1749                 if (act->get_node()->get_read_from_past_size() +
1750                                 act->get_node()->get_read_from_promise_size() <= 1)
1751                         return true;
1752         }
1753         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1754                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1755                 if (should_read_instead(curr, rf, write))
1756                         return false; /* liveness failure */
1757         }
1758         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1759                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1760                 if (should_read_instead(curr, rf, promise))
1761                         return false; /* liveness failure */
1762         }
1763         return true;
1764 }
1765
1766 /**
1767  * Updates the mo_graph with the constraints imposed from the current
1768  * read.
1769  *
1770  * Basic idea is the following: Go through each other thread and find
1771  * the last action that happened before our read.  Two cases:
1772  *
1773  * (1) The action is a write => that write must either occur before
1774  * the write we read from or be the write we read from.
1775  *
1776  * (2) The action is a read => the write that that action read from
1777  * must occur before the write we read from or be the same write.
1778  *
1779  * @param curr The current action. Must be a read.
1780  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1781  * @return True if modification order edges were added; false otherwise
1782  */
1783 template <typename rf_type>
1784 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1785 {
1786         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1787         unsigned int i;
1788         bool added = false;
1789         ASSERT(curr->is_read());
1790
1791         /* Last SC fence in the current thread */
1792         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1793
1794         /* Iterate over all threads */
1795         for (i = 0; i < thrd_lists->size(); i++) {
1796                 /* Last SC fence in thread i */
1797                 ModelAction *last_sc_fence_thread_local = NULL;
1798                 if (int_to_id((int)i) != curr->get_tid())
1799                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1800
1801                 /* Last SC fence in thread i, before last SC fence in current thread */
1802                 ModelAction *last_sc_fence_thread_before = NULL;
1803                 if (last_sc_fence_local)
1804                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1805
1806                 /* Iterate over actions in thread, starting from most recent */
1807                 action_list_t *list = &(*thrd_lists)[i];
1808                 action_list_t::reverse_iterator rit;
1809                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1810                         ModelAction *act = *rit;
1811
1812                         if (act->is_write() && !act->equals(rf) && act != curr) {
1813                                 /* C++, Section 29.3 statement 5 */
1814                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1815                                                 *act < *last_sc_fence_thread_local) {
1816                                         added = mo_graph->addEdge(act, rf) || added;
1817                                         break;
1818                                 }
1819                                 /* C++, Section 29.3 statement 4 */
1820                                 else if (act->is_seqcst() && last_sc_fence_local &&
1821                                                 *act < *last_sc_fence_local) {
1822                                         added = mo_graph->addEdge(act, rf) || added;
1823                                         break;
1824                                 }
1825                                 /* C++, Section 29.3 statement 6 */
1826                                 else if (last_sc_fence_thread_before &&
1827                                                 *act < *last_sc_fence_thread_before) {
1828                                         added = mo_graph->addEdge(act, rf) || added;
1829                                         break;
1830                                 }
1831                         }
1832
1833                         /*
1834                          * Include at most one act per-thread that "happens
1835                          * before" curr. Don't consider reflexively.
1836                          */
1837                         if (act->happens_before(curr) && act != curr) {
1838                                 if (act->is_write()) {
1839                                         if (!act->equals(rf)) {
1840                                                 added = mo_graph->addEdge(act, rf) || added;
1841                                         }
1842                                 } else {
1843                                         const ModelAction *prevrf = act->get_reads_from();
1844                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1845                                         if (prevrf) {
1846                                                 if (!prevrf->equals(rf))
1847                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1848                                         } else if (!prevrf_promise->equals(rf)) {
1849                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1850                                         }
1851                                 }
1852                                 break;
1853                         }
1854                 }
1855         }
1856
1857         /*
1858          * All compatible, thread-exclusive promises must be ordered after any
1859          * concrete loads from the same thread
1860          */
1861         for (unsigned int i = 0; i < promises->size(); i++)
1862                 if ((*promises)[i]->is_compatible_exclusive(curr))
1863                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1864
1865         return added;
1866 }
1867
1868 /**
1869  * Updates the mo_graph with the constraints imposed from the current write.
1870  *
1871  * Basic idea is the following: Go through each other thread and find
1872  * the lastest action that happened before our write.  Two cases:
1873  *
1874  * (1) The action is a write => that write must occur before
1875  * the current write
1876  *
1877  * (2) The action is a read => the write that that action read from
1878  * must occur before the current write.
1879  *
1880  * This method also handles two other issues:
1881  *
1882  * (I) Sequential Consistency: Making sure that if the current write is
1883  * seq_cst, that it occurs after the previous seq_cst write.
1884  *
1885  * (II) Sending the write back to non-synchronizing reads.
1886  *
1887  * @param curr The current action. Must be a write.
1888  * @param send_fv A vector for stashing reads to which we may pass our future
1889  * value. If NULL, then don't record any future values.
1890  * @return True if modification order edges were added; false otherwise
1891  */
1892 bool ModelChecker::w_modification_order(ModelAction *curr, std::vector< ModelAction *, ModelAlloc<ModelAction *> > *send_fv)
1893 {
1894         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1895         unsigned int i;
1896         bool added = false;
1897         ASSERT(curr->is_write());
1898
1899         if (curr->is_seqcst()) {
1900                 /* We have to at least see the last sequentially consistent write,
1901                          so we are initialized. */
1902                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1903                 if (last_seq_cst != NULL) {
1904                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1905                 }
1906         }
1907
1908         /* Last SC fence in the current thread */
1909         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1910
1911         /* Iterate over all threads */
1912         for (i = 0; i < thrd_lists->size(); i++) {
1913                 /* Last SC fence in thread i, before last SC fence in current thread */
1914                 ModelAction *last_sc_fence_thread_before = NULL;
1915                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1916                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1917
1918                 /* Iterate over actions in thread, starting from most recent */
1919                 action_list_t *list = &(*thrd_lists)[i];
1920                 action_list_t::reverse_iterator rit;
1921                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1922                         ModelAction *act = *rit;
1923                         if (act == curr) {
1924                                 /*
1925                                  * 1) If RMW and it actually read from something, then we
1926                                  * already have all relevant edges, so just skip to next
1927                                  * thread.
1928                                  *
1929                                  * 2) If RMW and it didn't read from anything, we should
1930                                  * whatever edge we can get to speed up convergence.
1931                                  *
1932                                  * 3) If normal write, we need to look at earlier actions, so
1933                                  * continue processing list.
1934                                  */
1935                                 if (curr->is_rmw()) {
1936                                         if (curr->get_reads_from() != NULL)
1937                                                 break;
1938                                         else
1939                                                 continue;
1940                                 } else
1941                                         continue;
1942                         }
1943
1944                         /* C++, Section 29.3 statement 7 */
1945                         if (last_sc_fence_thread_before && act->is_write() &&
1946                                         *act < *last_sc_fence_thread_before) {
1947                                 added = mo_graph->addEdge(act, curr) || added;
1948                                 break;
1949                         }
1950
1951                         /*
1952                          * Include at most one act per-thread that "happens
1953                          * before" curr
1954                          */
1955                         if (act->happens_before(curr)) {
1956                                 /*
1957                                  * Note: if act is RMW, just add edge:
1958                                  *   act --mo--> curr
1959                                  * The following edge should be handled elsewhere:
1960                                  *   readfrom(act) --mo--> act
1961                                  */
1962                                 if (act->is_write())
1963                                         added = mo_graph->addEdge(act, curr) || added;
1964                                 else if (act->is_read()) {
1965                                         //if previous read accessed a null, just keep going
1966                                         if (act->get_reads_from() == NULL)
1967                                                 continue;
1968                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1969                                 }
1970                                 break;
1971                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1972                                                      !act->same_thread(curr)) {
1973                                 /* We have an action that:
1974                                    (1) did not happen before us
1975                                    (2) is a read and we are a write
1976                                    (3) cannot synchronize with us
1977                                    (4) is in a different thread
1978                                    =>
1979                                    that read could potentially read from our write.  Note that
1980                                    these checks are overly conservative at this point, we'll
1981                                    do more checks before actually removing the
1982                                    pendingfuturevalue.
1983
1984                                  */
1985                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1986                                         if (!is_infeasible())
1987                                                 send_fv->push_back(act);
1988                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1989                                                 add_future_value(curr, act);
1990                                 }
1991                         }
1992                 }
1993         }
1994
1995         /*
1996          * All compatible, thread-exclusive promises must be ordered after any
1997          * concrete stores to the same thread, or else they can be merged with
1998          * this store later
1999          */
2000         for (unsigned int i = 0; i < promises->size(); i++)
2001                 if ((*promises)[i]->is_compatible_exclusive(curr))
2002                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2003
2004         return added;
2005 }
2006
2007 /** Arbitrary reads from the future are not allowed.  Section 29.3
2008  * part 9 places some constraints.  This method checks one result of constraint
2009  * constraint.  Others require compiler support. */
2010 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
2011 {
2012         if (!writer->is_rmw())
2013                 return true;
2014
2015         if (!reader->is_rmw())
2016                 return true;
2017
2018         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2019                 if (search == reader)
2020                         return false;
2021                 if (search->get_tid() == reader->get_tid() &&
2022                                 search->happens_before(reader))
2023                         break;
2024         }
2025
2026         return true;
2027 }
2028
2029 /**
2030  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2031  * some constraints. This method checks one the following constraint (others
2032  * require compiler support):
2033  *
2034  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2035  */
2036 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2037 {
2038         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2039         unsigned int i;
2040         /* Iterate over all threads */
2041         for (i = 0; i < thrd_lists->size(); i++) {
2042                 const ModelAction *write_after_read = NULL;
2043
2044                 /* Iterate over actions in thread, starting from most recent */
2045                 action_list_t *list = &(*thrd_lists)[i];
2046                 action_list_t::reverse_iterator rit;
2047                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2048                         ModelAction *act = *rit;
2049
2050                         /* Don't disallow due to act == reader */
2051                         if (!reader->happens_before(act) || reader == act)
2052                                 break;
2053                         else if (act->is_write())
2054                                 write_after_read = act;
2055                         else if (act->is_read() && act->get_reads_from() != NULL)
2056                                 write_after_read = act->get_reads_from();
2057                 }
2058
2059                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2060                         return false;
2061         }
2062         return true;
2063 }
2064
2065 /**
2066  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2067  * The ModelAction under consideration is expected to be taking part in
2068  * release/acquire synchronization as an object of the "reads from" relation.
2069  * Note that this can only provide release sequence support for RMW chains
2070  * which do not read from the future, as those actions cannot be traced until
2071  * their "promise" is fulfilled. Similarly, we may not even establish the
2072  * presence of a release sequence with certainty, as some modification order
2073  * constraints may be decided further in the future. Thus, this function
2074  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2075  * and a boolean representing certainty.
2076  *
2077  * @param rf The action that might be part of a release sequence. Must be a
2078  * write.
2079  * @param release_heads A pass-by-reference style return parameter. After
2080  * execution of this function, release_heads will contain the heads of all the
2081  * relevant release sequences, if any exists with certainty
2082  * @param pending A pass-by-reference style return parameter which is only used
2083  * when returning false (i.e., uncertain). Returns most information regarding
2084  * an uncertain release sequence, including any write operations that might
2085  * break the sequence.
2086  * @return true, if the ModelChecker is certain that release_heads is complete;
2087  * false otherwise
2088  */
2089 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2090                 rel_heads_list_t *release_heads,
2091                 struct release_seq *pending) const
2092 {
2093         /* Only check for release sequences if there are no cycles */
2094         if (mo_graph->checkForCycles())
2095                 return false;
2096
2097         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2098                 ASSERT(rf->is_write());
2099
2100                 if (rf->is_release())
2101                         release_heads->push_back(rf);
2102                 else if (rf->get_last_fence_release())
2103                         release_heads->push_back(rf->get_last_fence_release());
2104                 if (!rf->is_rmw())
2105                         break; /* End of RMW chain */
2106
2107                 /** @todo Need to be smarter here...  In the linux lock
2108                  * example, this will run to the beginning of the program for
2109                  * every acquire. */
2110                 /** @todo The way to be smarter here is to keep going until 1
2111                  * thread has a release preceded by an acquire and you've seen
2112                  *       both. */
2113
2114                 /* acq_rel RMW is a sufficient stopping condition */
2115                 if (rf->is_acquire() && rf->is_release())
2116                         return true; /* complete */
2117         };
2118         if (!rf) {
2119                 /* read from future: need to settle this later */
2120                 pending->rf = NULL;
2121                 return false; /* incomplete */
2122         }
2123
2124         if (rf->is_release())
2125                 return true; /* complete */
2126
2127         /* else relaxed write
2128          * - check for fence-release in the same thread (29.8, stmt. 3)
2129          * - check modification order for contiguous subsequence
2130          *   -> rf must be same thread as release */
2131
2132         const ModelAction *fence_release = rf->get_last_fence_release();
2133         /* Synchronize with a fence-release unconditionally; we don't need to
2134          * find any more "contiguous subsequence..." for it */
2135         if (fence_release)
2136                 release_heads->push_back(fence_release);
2137
2138         int tid = id_to_int(rf->get_tid());
2139         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2140         action_list_t *list = &(*thrd_lists)[tid];
2141         action_list_t::const_reverse_iterator rit;
2142
2143         /* Find rf in the thread list */
2144         rit = std::find(list->rbegin(), list->rend(), rf);
2145         ASSERT(rit != list->rend());
2146
2147         /* Find the last {write,fence}-release */
2148         for (; rit != list->rend(); rit++) {
2149                 if (fence_release && *(*rit) < *fence_release)
2150                         break;
2151                 if ((*rit)->is_release())
2152                         break;
2153         }
2154         if (rit == list->rend()) {
2155                 /* No write-release in this thread */
2156                 return true; /* complete */
2157         } else if (fence_release && *(*rit) < *fence_release) {
2158                 /* The fence-release is more recent (and so, "stronger") than
2159                  * the most recent write-release */
2160                 return true; /* complete */
2161         } /* else, need to establish contiguous release sequence */
2162         ModelAction *release = *rit;
2163
2164         ASSERT(rf->same_thread(release));
2165
2166         pending->writes.clear();
2167
2168         bool certain = true;
2169         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2170                 if (id_to_int(rf->get_tid()) == (int)i)
2171                         continue;
2172                 list = &(*thrd_lists)[i];
2173
2174                 /* Can we ensure no future writes from this thread may break
2175                  * the release seq? */
2176                 bool future_ordered = false;
2177
2178                 ModelAction *last = get_last_action(int_to_id(i));
2179                 Thread *th = get_thread(int_to_id(i));
2180                 if ((last && rf->happens_before(last)) ||
2181                                 !is_enabled(th) ||
2182                                 th->is_complete())
2183                         future_ordered = true;
2184
2185                 ASSERT(!th->is_model_thread() || future_ordered);
2186
2187                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2188                         const ModelAction *act = *rit;
2189                         /* Reach synchronization -> this thread is complete */
2190                         if (act->happens_before(release))
2191                                 break;
2192                         if (rf->happens_before(act)) {
2193                                 future_ordered = true;
2194                                 continue;
2195                         }
2196
2197                         /* Only non-RMW writes can break release sequences */
2198                         if (!act->is_write() || act->is_rmw())
2199                                 continue;
2200
2201                         /* Check modification order */
2202                         if (mo_graph->checkReachable(rf, act)) {
2203                                 /* rf --mo--> act */
2204                                 future_ordered = true;
2205                                 continue;
2206                         }
2207                         if (mo_graph->checkReachable(act, release))
2208                                 /* act --mo--> release */
2209                                 break;
2210                         if (mo_graph->checkReachable(release, act) &&
2211                                       mo_graph->checkReachable(act, rf)) {
2212                                 /* release --mo-> act --mo--> rf */
2213                                 return true; /* complete */
2214                         }
2215                         /* act may break release sequence */
2216                         pending->writes.push_back(act);
2217                         certain = false;
2218                 }
2219                 if (!future_ordered)
2220                         certain = false; /* This thread is uncertain */
2221         }
2222
2223         if (certain) {
2224                 release_heads->push_back(release);
2225                 pending->writes.clear();
2226         } else {
2227                 pending->release = release;
2228                 pending->rf = rf;
2229         }
2230         return certain;
2231 }
2232
2233 /**
2234  * An interface for getting the release sequence head(s) with which a
2235  * given ModelAction must synchronize. This function only returns a non-empty
2236  * result when it can locate a release sequence head with certainty. Otherwise,
2237  * it may mark the internal state of the ModelChecker so that it will handle
2238  * the release sequence at a later time, causing @a acquire to update its
2239  * synchronization at some later point in execution.
2240  *
2241  * @param acquire The 'acquire' action that may synchronize with a release
2242  * sequence
2243  * @param read The read action that may read from a release sequence; this may
2244  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2245  * when 'acquire' is a fence-acquire)
2246  * @param release_heads A pass-by-reference return parameter. Will be filled
2247  * with the head(s) of the release sequence(s), if they exists with certainty.
2248  * @see ModelChecker::release_seq_heads
2249  */
2250 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2251                 ModelAction *read, rel_heads_list_t *release_heads)
2252 {
2253         const ModelAction *rf = read->get_reads_from();
2254         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2255         sequence->acquire = acquire;
2256         sequence->read = read;
2257
2258         if (!release_seq_heads(rf, release_heads, sequence)) {
2259                 /* add act to 'lazy checking' list */
2260                 pending_rel_seqs->push_back(sequence);
2261         } else {
2262                 snapshot_free(sequence);
2263         }
2264 }
2265
2266 /**
2267  * Attempt to resolve all stashed operations that might synchronize with a
2268  * release sequence for a given location. This implements the "lazy" portion of
2269  * determining whether or not a release sequence was contiguous, since not all
2270  * modification order information is present at the time an action occurs.
2271  *
2272  * @param location The location/object that should be checked for release
2273  * sequence resolutions. A NULL value means to check all locations.
2274  * @param work_queue The work queue to which to add work items as they are
2275  * generated
2276  * @return True if any updates occurred (new synchronization, new mo_graph
2277  * edges)
2278  */
2279 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2280 {
2281         bool updated = false;
2282         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2283         while (it != pending_rel_seqs->end()) {
2284                 struct release_seq *pending = *it;
2285                 ModelAction *acquire = pending->acquire;
2286                 const ModelAction *read = pending->read;
2287
2288                 /* Only resolve sequences on the given location, if provided */
2289                 if (location && read->get_location() != location) {
2290                         it++;
2291                         continue;
2292                 }
2293
2294                 const ModelAction *rf = read->get_reads_from();
2295                 rel_heads_list_t release_heads;
2296                 bool complete;
2297                 complete = release_seq_heads(rf, &release_heads, pending);
2298                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2299                         if (!acquire->has_synchronized_with(release_heads[i])) {
2300                                 if (acquire->synchronize_with(release_heads[i]))
2301                                         updated = true;
2302                                 else
2303                                         set_bad_synchronization();
2304                         }
2305                 }
2306
2307                 if (updated) {
2308                         /* Re-check all pending release sequences */
2309                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2310                         /* Re-check read-acquire for mo_graph edges */
2311                         if (acquire->is_read())
2312                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2313
2314                         /* propagate synchronization to later actions */
2315                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2316                         for (; (*rit) != acquire; rit++) {
2317                                 ModelAction *propagate = *rit;
2318                                 if (acquire->happens_before(propagate)) {
2319                                         propagate->synchronize_with(acquire);
2320                                         /* Re-check 'propagate' for mo_graph edges */
2321                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2322                                 }
2323                         }
2324                 }
2325                 if (complete) {
2326                         it = pending_rel_seqs->erase(it);
2327                         snapshot_free(pending);
2328                 } else {
2329                         it++;
2330                 }
2331         }
2332
2333         // If we resolved promises or data races, see if we have realized a data race.
2334         checkDataRaces();
2335
2336         return updated;
2337 }
2338
2339 /**
2340  * Performs various bookkeeping operations for the current ModelAction. For
2341  * instance, adds action to the per-object, per-thread action vector and to the
2342  * action trace list of all thread actions.
2343  *
2344  * @param act is the ModelAction to add.
2345  */
2346 void ModelChecker::add_action_to_lists(ModelAction *act)
2347 {
2348         int tid = id_to_int(act->get_tid());
2349         ModelAction *uninit = NULL;
2350         int uninit_id = -1;
2351         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2352         if (list->empty() && act->is_atomic_var()) {
2353                 uninit = new_uninitialized_action(act->get_location());
2354                 uninit_id = id_to_int(uninit->get_tid());
2355                 list->push_back(uninit);
2356         }
2357         list->push_back(act);
2358
2359         action_trace->push_back(act);
2360         if (uninit)
2361                 action_trace->push_front(uninit);
2362
2363         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2364         if (tid >= (int)vec->size())
2365                 vec->resize(priv->next_thread_id);
2366         (*vec)[tid].push_back(act);
2367         if (uninit)
2368                 (*vec)[uninit_id].push_front(uninit);
2369
2370         if ((int)thrd_last_action->size() <= tid)
2371                 thrd_last_action->resize(get_num_threads());
2372         (*thrd_last_action)[tid] = act;
2373         if (uninit)
2374                 (*thrd_last_action)[uninit_id] = uninit;
2375
2376         if (act->is_fence() && act->is_release()) {
2377                 if ((int)thrd_last_fence_release->size() <= tid)
2378                         thrd_last_fence_release->resize(get_num_threads());
2379                 (*thrd_last_fence_release)[tid] = act;
2380         }
2381
2382         if (act->is_wait()) {
2383                 void *mutex_loc = (void *) act->get_value();
2384                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2385
2386                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2387                 if (tid >= (int)vec->size())
2388                         vec->resize(priv->next_thread_id);
2389                 (*vec)[tid].push_back(act);
2390         }
2391 }
2392
2393 /**
2394  * @brief Get the last action performed by a particular Thread
2395  * @param tid The thread ID of the Thread in question
2396  * @return The last action in the thread
2397  */
2398 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2399 {
2400         int threadid = id_to_int(tid);
2401         if (threadid < (int)thrd_last_action->size())
2402                 return (*thrd_last_action)[id_to_int(tid)];
2403         else
2404                 return NULL;
2405 }
2406
2407 /**
2408  * @brief Get the last fence release performed by a particular Thread
2409  * @param tid The thread ID of the Thread in question
2410  * @return The last fence release in the thread, if one exists; NULL otherwise
2411  */
2412 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2413 {
2414         int threadid = id_to_int(tid);
2415         if (threadid < (int)thrd_last_fence_release->size())
2416                 return (*thrd_last_fence_release)[id_to_int(tid)];
2417         else
2418                 return NULL;
2419 }
2420
2421 /**
2422  * Gets the last memory_order_seq_cst write (in the total global sequence)
2423  * performed on a particular object (i.e., memory location), not including the
2424  * current action.
2425  * @param curr The current ModelAction; also denotes the object location to
2426  * check
2427  * @return The last seq_cst write
2428  */
2429 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2430 {
2431         void *location = curr->get_location();
2432         action_list_t *list = get_safe_ptr_action(obj_map, location);
2433         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2434         action_list_t::reverse_iterator rit;
2435         for (rit = list->rbegin(); rit != list->rend(); rit++)
2436                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2437                         return *rit;
2438         return NULL;
2439 }
2440
2441 /**
2442  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2443  * performed in a particular thread, prior to a particular fence.
2444  * @param tid The ID of the thread to check
2445  * @param before_fence The fence from which to begin the search; if NULL, then
2446  * search for the most recent fence in the thread.
2447  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2448  */
2449 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2450 {
2451         /* All fences should have NULL location */
2452         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2453         action_list_t::reverse_iterator rit = list->rbegin();
2454
2455         if (before_fence) {
2456                 for (; rit != list->rend(); rit++)
2457                         if (*rit == before_fence)
2458                                 break;
2459
2460                 ASSERT(*rit == before_fence);
2461                 rit++;
2462         }
2463
2464         for (; rit != list->rend(); rit++)
2465                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2466                         return *rit;
2467         return NULL;
2468 }
2469
2470 /**
2471  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2472  * location). This function identifies the mutex according to the current
2473  * action, which is presumed to perform on the same mutex.
2474  * @param curr The current ModelAction; also denotes the object location to
2475  * check
2476  * @return The last unlock operation
2477  */
2478 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2479 {
2480         void *location = curr->get_location();
2481         action_list_t *list = get_safe_ptr_action(obj_map, location);
2482         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2483         action_list_t::reverse_iterator rit;
2484         for (rit = list->rbegin(); rit != list->rend(); rit++)
2485                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2486                         return *rit;
2487         return NULL;
2488 }
2489
2490 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2491 {
2492         ModelAction *parent = get_last_action(tid);
2493         if (!parent)
2494                 parent = get_thread(tid)->get_creation();
2495         return parent;
2496 }
2497
2498 /**
2499  * Returns the clock vector for a given thread.
2500  * @param tid The thread whose clock vector we want
2501  * @return Desired clock vector
2502  */
2503 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2504 {
2505         return get_parent_action(tid)->get_cv();
2506 }
2507
2508 /**
2509  * @brief Find the promise, if any to resolve for the current action
2510  * @param curr The current ModelAction. Should be a write.
2511  * @return The (non-negative) index for the Promise to resolve, if any;
2512  * otherwise -1
2513  */
2514 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2515 {
2516         for (unsigned int i = 0; i < promises->size(); i++)
2517                 if (curr->get_node()->get_promise(i))
2518                         return i;
2519         return -1;
2520 }
2521
2522 /**
2523  * Resolve a Promise with a current write.
2524  * @param write The ModelAction that is fulfilling Promises
2525  * @param promise_idx The index corresponding to the promise
2526  * @return True if the Promise was successfully resolved; false otherwise
2527  */
2528 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2529 {
2530         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2531         Promise *promise = (*promises)[promise_idx];
2532
2533         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2534                 ModelAction *read = promise->get_reader(i);
2535                 read_from(read, write);
2536                 actions_to_check.push_back(read);
2537         }
2538         /* Make sure the promise's value matches the write's value */
2539         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2540         if (!mo_graph->resolvePromise(promise, write))
2541                 priv->failed_promise = true;
2542
2543         promises->erase(promises->begin() + promise_idx);
2544         /**
2545          * @todo  It is possible to end up in an inconsistent state, where a
2546          * "resolved" promise may still be referenced if
2547          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2548          *
2549          * Note that the inconsistency only matters when dumping mo_graph to
2550          * file.
2551          *
2552          * delete promise;
2553          */
2554
2555         //Check whether reading these writes has made threads unable to
2556         //resolve promises
2557         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2558                 ModelAction *read = actions_to_check[i];
2559                 mo_check_promises(read, true);
2560         }
2561
2562         return true;
2563 }
2564
2565 /**
2566  * Compute the set of promises that could potentially be satisfied by this
2567  * action. Note that the set computation actually appears in the Node, not in
2568  * ModelChecker.
2569  * @param curr The ModelAction that may satisfy promises
2570  */
2571 void ModelChecker::compute_promises(ModelAction *curr)
2572 {
2573         for (unsigned int i = 0; i < promises->size(); i++) {
2574                 Promise *promise = (*promises)[i];
2575                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2576                         continue;
2577
2578                 bool satisfy = true;
2579                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2580                         const ModelAction *act = promise->get_reader(j);
2581                         if (act->happens_before(curr) ||
2582                                         act->could_synchronize_with(curr)) {
2583                                 satisfy = false;
2584                                 break;
2585                         }
2586                 }
2587                 if (satisfy)
2588                         curr->get_node()->set_promise(i);
2589         }
2590 }
2591
2592 /** Checks promises in response to change in ClockVector Threads. */
2593 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2594 {
2595         for (unsigned int i = 0; i < promises->size(); i++) {
2596                 Promise *promise = (*promises)[i];
2597                 if (!promise->thread_is_available(tid))
2598                         continue;
2599                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2600                         const ModelAction *act = promise->get_reader(j);
2601                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2602                                         merge_cv->synchronized_since(act)) {
2603                                 if (promise->eliminate_thread(tid)) {
2604                                         /* Promise has failed */
2605                                         priv->failed_promise = true;
2606                                         return;
2607                                 }
2608                         }
2609                 }
2610         }
2611 }
2612
2613 void ModelChecker::check_promises_thread_disabled()
2614 {
2615         for (unsigned int i = 0; i < promises->size(); i++) {
2616                 Promise *promise = (*promises)[i];
2617                 if (promise->has_failed()) {
2618                         priv->failed_promise = true;
2619                         return;
2620                 }
2621         }
2622 }
2623
2624 /**
2625  * @brief Checks promises in response to addition to modification order for
2626  * threads.
2627  *
2628  * We test whether threads are still available for satisfying promises after an
2629  * addition to our modification order constraints. Those that are unavailable
2630  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2631  * that promise has failed.
2632  *
2633  * @param act The ModelAction which updated the modification order
2634  * @param is_read_check Should be true if act is a read and we must check for
2635  * updates to the store from which it read (there is a distinction here for
2636  * RMW's, which are both a load and a store)
2637  */
2638 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2639 {
2640         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2641
2642         for (unsigned int i = 0; i < promises->size(); i++) {
2643                 Promise *promise = (*promises)[i];
2644
2645                 // Is this promise on the same location?
2646                 if (!promise->same_location(write))
2647                         continue;
2648
2649                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2650                         const ModelAction *pread = promise->get_reader(j);
2651                         if (!pread->happens_before(act))
2652                                continue;
2653                         if (mo_graph->checkPromise(write, promise)) {
2654                                 priv->failed_promise = true;
2655                                 return;
2656                         }
2657                         break;
2658                 }
2659
2660                 // Don't do any lookups twice for the same thread
2661                 if (!promise->thread_is_available(act->get_tid()))
2662                         continue;
2663
2664                 if (mo_graph->checkReachable(promise, write)) {
2665                         if (mo_graph->checkPromise(write, promise)) {
2666                                 priv->failed_promise = true;
2667                                 return;
2668                         }
2669                 }
2670         }
2671 }
2672
2673 /**
2674  * Compute the set of writes that may break the current pending release
2675  * sequence. This information is extracted from previou release sequence
2676  * calculations.
2677  *
2678  * @param curr The current ModelAction. Must be a release sequence fixup
2679  * action.
2680  */
2681 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2682 {
2683         if (pending_rel_seqs->empty())
2684                 return;
2685
2686         struct release_seq *pending = pending_rel_seqs->back();
2687         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2688                 const ModelAction *write = pending->writes[i];
2689                 curr->get_node()->add_relseq_break(write);
2690         }
2691
2692         /* NULL means don't break the sequence; just synchronize */
2693         curr->get_node()->add_relseq_break(NULL);
2694 }
2695
2696 /**
2697  * Build up an initial set of all past writes that this 'read' action may read
2698  * from, as well as any previously-observed future values that must still be valid.
2699  *
2700  * @param curr is the current ModelAction that we are exploring; it must be a
2701  * 'read' operation.
2702  */
2703 void ModelChecker::build_may_read_from(ModelAction *curr)
2704 {
2705         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2706         unsigned int i;
2707         ASSERT(curr->is_read());
2708
2709         ModelAction *last_sc_write = NULL;
2710
2711         if (curr->is_seqcst())
2712                 last_sc_write = get_last_seq_cst_write(curr);
2713
2714         /* Iterate over all threads */
2715         for (i = 0; i < thrd_lists->size(); i++) {
2716                 /* Iterate over actions in thread, starting from most recent */
2717                 action_list_t *list = &(*thrd_lists)[i];
2718                 action_list_t::reverse_iterator rit;
2719                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2720                         ModelAction *act = *rit;
2721
2722                         /* Only consider 'write' actions */
2723                         if (!act->is_write() || act == curr)
2724                                 continue;
2725
2726                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2727                         bool allow_read = true;
2728
2729                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2730                                 allow_read = false;
2731                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2732                                 allow_read = false;
2733
2734                         if (allow_read) {
2735                                 /* Only add feasible reads */
2736                                 mo_graph->startChanges();
2737                                 r_modification_order(curr, act);
2738                                 if (!is_infeasible())
2739                                         curr->get_node()->add_read_from_past(act);
2740                                 mo_graph->rollbackChanges();
2741                         }
2742
2743                         /* Include at most one act per-thread that "happens before" curr */
2744                         if (act->happens_before(curr))
2745                                 break;
2746                 }
2747         }
2748
2749         /* Inherit existing, promised future values */
2750         for (i = 0; i < promises->size(); i++) {
2751                 const Promise *promise = (*promises)[i];
2752                 const ModelAction *promise_read = promise->get_reader(0);
2753                 if (promise_read->same_var(curr)) {
2754                         /* Only add feasible future-values */
2755                         mo_graph->startChanges();
2756                         r_modification_order(curr, promise);
2757                         if (!is_infeasible())
2758                                 curr->get_node()->add_read_from_promise(promise_read);
2759                         mo_graph->rollbackChanges();
2760                 }
2761         }
2762
2763         /* We may find no valid may-read-from only if the execution is doomed */
2764         if (!curr->get_node()->read_from_size()) {
2765                 priv->no_valid_reads = true;
2766                 set_assert();
2767         }
2768
2769         if (DBG_ENABLED()) {
2770                 model_print("Reached read action:\n");
2771                 curr->print();
2772                 model_print("Printing read_from_past\n");
2773                 curr->get_node()->print_read_from_past();
2774                 model_print("End printing read_from_past\n");
2775         }
2776 }
2777
2778 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2779 {
2780         for ( ; write != NULL; write = write->get_reads_from()) {
2781                 /* UNINIT actions don't have a Node, and they never sleep */
2782                 if (write->is_uninitialized())
2783                         return true;
2784                 Node *prevnode = write->get_node()->get_parent();
2785
2786                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2787                 if (write->is_release() && thread_sleep)
2788                         return true;
2789                 if (!write->is_rmw())
2790                         return false;
2791         }
2792         return true;
2793 }
2794
2795 /**
2796  * @brief Create a new action representing an uninitialized atomic
2797  * @param location The memory location of the atomic object
2798  * @return A pointer to a new ModelAction
2799  */
2800 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2801 {
2802         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2803         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2804         act->create_cv(NULL);
2805         return act;
2806 }
2807
2808 static void print_list(action_list_t *list)
2809 {
2810         action_list_t::iterator it;
2811
2812         model_print("---------------------------------------------------------------------\n");
2813
2814         unsigned int hash = 0;
2815
2816         for (it = list->begin(); it != list->end(); it++) {
2817                 (*it)->print();
2818                 hash = hash^(hash<<3)^((*it)->hash());
2819         }
2820         model_print("HASH %u\n", hash);
2821         model_print("---------------------------------------------------------------------\n");
2822 }
2823
2824 #if SUPPORT_MOD_ORDER_DUMP
2825 void ModelChecker::dumpGraph(char *filename) const
2826 {
2827         char buffer[200];
2828         sprintf(buffer, "%s.dot", filename);
2829         FILE *file = fopen(buffer, "w");
2830         fprintf(file, "digraph %s {\n", filename);
2831         mo_graph->dumpNodes(file);
2832         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2833
2834         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2835                 ModelAction *act = *it;
2836                 if (act->is_read()) {
2837                         mo_graph->dot_print_node(file, act);
2838                         if (act->get_reads_from())
2839                                 mo_graph->dot_print_edge(file,
2840                                                 act->get_reads_from(),
2841                                                 act,
2842                                                 "label=\"rf\", color=red, weight=2");
2843                         else
2844                                 mo_graph->dot_print_edge(file,
2845                                                 act->get_reads_from_promise(),
2846                                                 act,
2847                                                 "label=\"rf\", color=red");
2848                 }
2849                 if (thread_array[act->get_tid()]) {
2850                         mo_graph->dot_print_edge(file,
2851                                         thread_array[id_to_int(act->get_tid())],
2852                                         act,
2853                                         "label=\"sb\", color=blue, weight=400");
2854                 }
2855
2856                 thread_array[act->get_tid()] = act;
2857         }
2858         fprintf(file, "}\n");
2859         model_free(thread_array);
2860         fclose(file);
2861 }
2862 #endif
2863
2864 /** @brief Prints an execution trace summary. */
2865 void ModelChecker::print_summary() const
2866 {
2867 #if SUPPORT_MOD_ORDER_DUMP
2868         char buffername[100];
2869         sprintf(buffername, "exec%04u", stats.num_total);
2870         mo_graph->dumpGraphToFile(buffername);
2871         sprintf(buffername, "graph%04u", stats.num_total);
2872         dumpGraph(buffername);
2873 #endif
2874
2875         model_print("Execution %d:", stats.num_total);
2876         if (isfeasibleprefix()) {
2877                 if (scheduler->all_threads_sleeping())
2878                         model_print(" SLEEP-SET REDUNDANT");
2879                 model_print("\n");
2880         } else
2881                 print_infeasibility(" INFEASIBLE");
2882         print_list(action_trace);
2883         model_print("\n");
2884         if (!promises->empty()) {
2885                 model_print("Pending promises:\n");
2886                 for (unsigned int i = 0; i < promises->size(); i++) {
2887                         model_print(" [P%u] ", i);
2888                         (*promises)[i]->print();
2889                 }
2890                 model_print("\n");
2891         }
2892 }
2893
2894 /**
2895  * Add a Thread to the system for the first time. Should only be called once
2896  * per thread.
2897  * @param t The Thread to add
2898  */
2899 void ModelChecker::add_thread(Thread *t)
2900 {
2901         thread_map->put(id_to_int(t->get_id()), t);
2902         scheduler->add_thread(t);
2903 }
2904
2905 /**
2906  * Removes a thread from the scheduler.
2907  * @param the thread to remove.
2908  */
2909 void ModelChecker::remove_thread(Thread *t)
2910 {
2911         scheduler->remove_thread(t);
2912 }
2913
2914 /**
2915  * @brief Get a Thread reference by its ID
2916  * @param tid The Thread's ID
2917  * @return A Thread reference
2918  */
2919 Thread * ModelChecker::get_thread(thread_id_t tid) const
2920 {
2921         return thread_map->get(id_to_int(tid));
2922 }
2923
2924 /**
2925  * @brief Get a reference to the Thread in which a ModelAction was executed
2926  * @param act The ModelAction
2927  * @return A Thread reference
2928  */
2929 Thread * ModelChecker::get_thread(const ModelAction *act) const
2930 {
2931         return get_thread(act->get_tid());
2932 }
2933
2934 /**
2935  * @brief Get a Promise's "promise number"
2936  *
2937  * A "promise number" is an index number that is unique to a promise, valid
2938  * only for a specific snapshot of an execution trace. Promises may come and go
2939  * as they are generated an resolved, so an index only retains meaning for the
2940  * current snapshot.
2941  *
2942  * @param promise The Promise to check
2943  * @return The promise index, if the promise still is valid; otherwise -1
2944  */
2945 int ModelChecker::get_promise_number(const Promise *promise) const
2946 {
2947         for (unsigned int i = 0; i < promises->size(); i++)
2948                 if ((*promises)[i] == promise)
2949                         return i;
2950         /* Not found */
2951         return -1;
2952 }
2953
2954 /**
2955  * @brief Check if a Thread is currently enabled
2956  * @param t The Thread to check
2957  * @return True if the Thread is currently enabled
2958  */
2959 bool ModelChecker::is_enabled(Thread *t) const
2960 {
2961         return scheduler->is_enabled(t);
2962 }
2963
2964 /**
2965  * @brief Check if a Thread is currently enabled
2966  * @param tid The ID of the Thread to check
2967  * @return True if the Thread is currently enabled
2968  */
2969 bool ModelChecker::is_enabled(thread_id_t tid) const
2970 {
2971         return scheduler->is_enabled(tid);
2972 }
2973
2974 /**
2975  * Switch from a model-checker context to a user-thread context. This is the
2976  * complement of ModelChecker::switch_to_master and must be called from the
2977  * model-checker context
2978  *
2979  * @param thread The user-thread to switch to
2980  */
2981 void ModelChecker::switch_from_master(Thread *thread)
2982 {
2983         scheduler->set_current_thread(thread);
2984         Thread::swap(&system_context, thread);
2985 }
2986
2987 /**
2988  * Switch from a user-context to the "master thread" context (a.k.a. system
2989  * context). This switch is made with the intention of exploring a particular
2990  * model-checking action (described by a ModelAction object). Must be called
2991  * from a user-thread context.
2992  *
2993  * @param act The current action that will be explored. May be NULL only if
2994  * trace is exiting via an assertion (see ModelChecker::set_assert and
2995  * ModelChecker::has_asserted).
2996  * @return Return the value returned by the current action
2997  */
2998 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2999 {
3000         DBG();
3001         Thread *old = thread_current();
3002         ASSERT(!old->get_pending());
3003         old->set_pending(act);
3004         if (Thread::swap(old, &system_context) < 0) {
3005                 perror("swap threads");
3006                 exit(EXIT_FAILURE);
3007         }
3008         return old->get_return_value();
3009 }
3010
3011 /**
3012  * Takes the next step in the execution, if possible.
3013  * @param curr The current step to take
3014  * @return Returns the next Thread to run, if any; NULL if this execution
3015  * should terminate
3016  */
3017 Thread * ModelChecker::take_step(ModelAction *curr)
3018 {
3019         Thread *curr_thrd = get_thread(curr);
3020         ASSERT(curr_thrd->get_state() == THREAD_READY);
3021
3022         curr = check_current_action(curr);
3023
3024         /* Infeasible -> don't take any more steps */
3025         if (is_infeasible())
3026                 return NULL;
3027         else if (isfeasibleprefix() && have_bug_reports()) {
3028                 set_assert();
3029                 return NULL;
3030         }
3031
3032         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3033                 return NULL;
3034
3035         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3036                 scheduler->remove_thread(curr_thrd);
3037
3038         Thread *next_thrd = NULL;
3039         if (curr)
3040                 next_thrd = action_select_next_thread(curr);
3041         if (!next_thrd)
3042                 next_thrd = get_next_thread();
3043
3044         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3045                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3046
3047         return next_thrd;
3048 }
3049
3050 /** Wrapper to run the user's main function, with appropriate arguments */
3051 void user_main_wrapper(void *)
3052 {
3053         user_main(model->params.argc, model->params.argv);
3054 }
3055
3056 /** @brief Run ModelChecker for the user program */
3057 void ModelChecker::run()
3058 {
3059         do {
3060                 thrd_t user_thread;
3061                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3062                 add_thread(t);
3063
3064                 do {
3065                         /*
3066                          * Stash next pending action(s) for thread(s). There
3067                          * should only need to stash one thread's action--the
3068                          * thread which just took a step--plus the first step
3069                          * for any newly-created thread
3070                          */
3071                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3072                                 thread_id_t tid = int_to_id(i);
3073                                 Thread *thr = get_thread(tid);
3074                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3075                                         switch_from_master(thr);
3076                                         if (is_circular_wait(thr))
3077                                                 assert_bug("Deadlock detected");
3078                                 }
3079                         }
3080
3081                         /* Catch assertions from prior take_step or from
3082                          * between-ModelAction bugs (e.g., data races) */
3083                         if (has_asserted())
3084                                 break;
3085
3086                         /* Consume the next action for a Thread */
3087                         ModelAction *curr = t->get_pending();
3088                         t->set_pending(NULL);
3089                         t = take_step(curr);
3090                 } while (t && !t->is_model_thread());
3091
3092                 /*
3093                  * Launch end-of-execution release sequence fixups only when
3094                  * the execution is otherwise feasible AND there are:
3095                  *
3096                  * (1) pending release sequences
3097                  * (2) pending assertions that could be invalidated by a change
3098                  * in clock vectors (i.e., data races)
3099                  * (3) no pending promises
3100                  */
3101                 while (!pending_rel_seqs->empty() &&
3102                                 is_feasible_prefix_ignore_relseq() &&
3103                                 !unrealizedraces.empty()) {
3104                         model_print("*** WARNING: release sequence fixup action "
3105                                         "(%zu pending release seuqence(s)) ***\n",
3106                                         pending_rel_seqs->size());
3107                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3108                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3109                                         model_thread);
3110                         take_step(fixup);
3111                 };
3112         } while (next_execution());
3113
3114         model_print("******* Model-checking complete: *******\n");
3115         print_stats();
3116 }