model: use get_write_value()
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else {
487                 stats.num_redundant++;
488
489                 /**
490                  * @todo We can violate this ASSERT() when fairness/sleep sets
491                  * conflict to cause an execution to terminate, e.g. with:
492                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
493                  */
494                 //ASSERT(scheduler->all_threads_sleeping());
495         }
496 }
497
498 /** @brief Print execution stats */
499 void ModelChecker::print_stats() const
500 {
501         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
502         model_print("Number of redundant executions: %d\n", stats.num_redundant);
503         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
504         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
505         model_print("Total executions: %d\n", stats.num_total);
506         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
507 }
508
509 /**
510  * @brief End-of-exeuction print
511  * @param printbugs Should any existing bugs be printed?
512  */
513 void ModelChecker::print_execution(bool printbugs) const
514 {
515         print_program_output();
516
517         if (DBG_ENABLED() || params.verbose) {
518                 model_print("Earliest divergence point since last feasible execution:\n");
519                 if (earliest_diverge)
520                         earliest_diverge->print();
521                 else
522                         model_print("(Not set)\n");
523
524                 model_print("\n");
525                 print_stats();
526         }
527
528         /* Don't print invalid bugs */
529         if (printbugs)
530                 print_bugs();
531
532         model_print("\n");
533         print_summary();
534 }
535
536 /**
537  * Queries the model-checker for more executions to explore and, if one
538  * exists, resets the model-checker state to execute a new execution.
539  *
540  * @return If there are more executions to explore, return true. Otherwise,
541  * return false.
542  */
543 bool ModelChecker::next_execution()
544 {
545         DBG();
546         /* Is this execution a feasible execution that's worth bug-checking? */
547         bool complete = isfeasibleprefix() && (is_complete_execution() ||
548                         have_bug_reports());
549
550         /* End-of-execution bug checks */
551         if (complete) {
552                 if (is_deadlocked())
553                         assert_bug("Deadlock detected");
554
555                 checkDataRaces();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /**
582  * @brief Find the last fence-related backtracking conflict for a ModelAction
583  *
584  * This function performs the search for the most recent conflicting action
585  * against which we should perform backtracking, as affected by fence
586  * operations. This includes pairs of potentially-synchronizing actions which
587  * occur due to fence-acquire or fence-release, and hence should be explored in
588  * the opposite execution order.
589  *
590  * @param act The current action
591  * @return The most recent action which conflicts with act due to fences
592  */
593 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
594 {
595         /* Only perform release/acquire fence backtracking for stores */
596         if (!act->is_write())
597                 return NULL;
598
599         /* Find a fence-release (or, act is a release) */
600         ModelAction *last_release;
601         if (act->is_release())
602                 last_release = act;
603         else
604                 last_release = get_last_fence_release(act->get_tid());
605         if (!last_release)
606                 return NULL;
607
608         /* Skip past the release */
609         action_list_t *list = action_trace;
610         action_list_t::reverse_iterator rit;
611         for (rit = list->rbegin(); rit != list->rend(); rit++)
612                 if (*rit == last_release)
613                         break;
614         ASSERT(rit != list->rend());
615
616         /* Find a prior:
617          *   load-acquire
618          * or
619          *   load --sb-> fence-acquire */
620         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
621         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
622         bool found_acquire_fences = false;
623         for ( ; rit != list->rend(); rit++) {
624                 ModelAction *prev = *rit;
625                 if (act->same_thread(prev))
626                         continue;
627
628                 int tid = id_to_int(prev->get_tid());
629
630                 if (prev->is_read() && act->same_var(prev)) {
631                         if (prev->is_acquire()) {
632                                 /* Found most recent load-acquire, don't need
633                                  * to search for more fences */
634                                 if (!found_acquire_fences)
635                                         return NULL;
636                         } else {
637                                 prior_loads[tid] = prev;
638                         }
639                 }
640                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
641                         found_acquire_fences = true;
642                         acquire_fences[tid] = prev;
643                 }
644         }
645
646         ModelAction *latest_backtrack = NULL;
647         for (unsigned int i = 0; i < acquire_fences.size(); i++)
648                 if (acquire_fences[i] && prior_loads[i])
649                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
650                                 latest_backtrack = acquire_fences[i];
651         return latest_backtrack;
652 }
653
654 /**
655  * @brief Find the last backtracking conflict for a ModelAction
656  *
657  * This function performs the search for the most recent conflicting action
658  * against which we should perform backtracking. This primary includes pairs of
659  * synchronizing actions which should be explored in the opposite execution
660  * order.
661  *
662  * @param act The current action
663  * @return The most recent action which conflicts with act
664  */
665 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
666 {
667         switch (act->get_type()) {
668         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
669         case ATOMIC_READ:
670         case ATOMIC_WRITE:
671         case ATOMIC_RMW: {
672                 ModelAction *ret = NULL;
673
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (prev->could_synchronize_with(act)) {
680                                 ret = prev;
681                                 break;
682                         }
683                 }
684
685                 ModelAction *ret2 = get_last_fence_conflict(act);
686                 if (!ret2)
687                         return ret;
688                 if (!ret)
689                         return ret2;
690                 if (*ret < *ret2)
691                         return ret2;
692                 return ret;
693         }
694         case ATOMIC_LOCK:
695         case ATOMIC_TRYLOCK: {
696                 /* linear search: from most recent to oldest */
697                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
698                 action_list_t::reverse_iterator rit;
699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
700                         ModelAction *prev = *rit;
701                         if (act->is_conflicting_lock(prev))
702                                 return prev;
703                 }
704                 break;
705         }
706         case ATOMIC_UNLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (!act->same_thread(prev) && prev->is_failed_trylock())
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                         if (!act->same_thread(prev) && prev->is_notify())
726                                 return prev;
727                 }
728                 break;
729         }
730
731         case ATOMIC_NOTIFY_ALL:
732         case ATOMIC_NOTIFY_ONE: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_wait())
739                                 return prev;
740                 }
741                 break;
742         }
743         default:
744                 break;
745         }
746         return NULL;
747 }
748
749 /** This method finds backtracking points where we should try to
750  * reorder the parameter ModelAction against.
751  *
752  * @param the ModelAction to find backtracking points for.
753  */
754 void ModelChecker::set_backtracking(ModelAction *act)
755 {
756         Thread *t = get_thread(act);
757         ModelAction *prev = get_last_conflict(act);
758         if (prev == NULL)
759                 return;
760
761         Node *node = prev->get_node()->get_parent();
762
763         int low_tid, high_tid;
764         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
765                 low_tid = id_to_int(act->get_tid());
766                 high_tid = low_tid + 1;
767         } else {
768                 low_tid = 0;
769                 high_tid = get_num_threads();
770         }
771
772         for (int i = low_tid; i < high_tid; i++) {
773                 thread_id_t tid = int_to_id(i);
774
775                 /* Make sure this thread can be enabled here. */
776                 if (i >= node->get_num_threads())
777                         break;
778
779                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
780                 if (node->enabled_status(tid) != THREAD_ENABLED)
781                         continue;
782
783                 /* Check if this has been explored already */
784                 if (node->has_been_explored(tid))
785                         continue;
786
787                 /* See if fairness allows */
788                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
789                         bool unfair = false;
790                         for (int t = 0; t < node->get_num_threads(); t++) {
791                                 thread_id_t tother = int_to_id(t);
792                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
793                                         unfair = true;
794                                         break;
795                                 }
796                         }
797                         if (unfair)
798                                 continue;
799                 }
800                 /* Cache the latest backtracking point */
801                 set_latest_backtrack(prev);
802
803                 /* If this is a new backtracking point, mark the tree */
804                 if (!node->set_backtrack(tid))
805                         continue;
806                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
807                                         id_to_int(prev->get_tid()),
808                                         id_to_int(t->get_id()));
809                 if (DBG_ENABLED()) {
810                         prev->print();
811                         act->print();
812                 }
813         }
814 }
815
816 /**
817  * @brief Cache the a backtracking point as the "most recent", if eligible
818  *
819  * Note that this does not prepare the NodeStack for this backtracking
820  * operation, it only caches the action on a per-execution basis
821  *
822  * @param act The operation at which we should explore a different next action
823  * (i.e., backtracking point)
824  * @return True, if this action is now the most recent backtracking point;
825  * false otherwise
826  */
827 bool ModelChecker::set_latest_backtrack(ModelAction *act)
828 {
829         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
830                 priv->next_backtrack = act;
831                 return true;
832         }
833         return false;
834 }
835
836 /**
837  * Returns last backtracking point. The model checker will explore a different
838  * path for this point in the next execution.
839  * @return The ModelAction at which the next execution should diverge.
840  */
841 ModelAction * ModelChecker::get_next_backtrack()
842 {
843         ModelAction *next = priv->next_backtrack;
844         priv->next_backtrack = NULL;
845         return next;
846 }
847
848 /**
849  * Processes a read model action.
850  * @param curr is the read model action to process.
851  * @return True if processing this read updates the mo_graph.
852  */
853 bool ModelChecker::process_read(ModelAction *curr)
854 {
855         Node *node = curr->get_node();
856         uint64_t value = VALUE_NONE;
857         while (true) {
858                 bool updated = false;
859                 switch (node->get_read_from_status()) {
860                 case READ_FROM_PAST: {
861                         const ModelAction *rf = node->get_read_from_past();
862                         ASSERT(rf);
863
864                         mo_graph->startChanges();
865
866                         ASSERT(!is_infeasible());
867                         if (!check_recency(curr, rf)) {
868                                 if (node->increment_read_from()) {
869                                         mo_graph->rollbackChanges();
870                                         continue;
871                                 } else {
872                                         priv->too_many_reads = true;
873                                 }
874                         }
875
876                         updated = r_modification_order(curr, rf);
877                         value = rf->get_write_value();
878                         read_from(curr, rf);
879                         mo_graph->commitChanges();
880                         mo_check_promises(curr, true);
881                         break;
882                 }
883                 case READ_FROM_PROMISE: {
884                         Promise *promise = curr->get_node()->get_read_from_promise();
885                         if (promise->add_reader(curr))
886                                 priv->failed_promise = true;
887                         value = promise->get_value();
888                         curr->set_read_from_promise(promise);
889                         mo_graph->startChanges();
890                         if (!check_recency(curr, promise))
891                                 priv->too_many_reads = true;
892                         updated = r_modification_order(curr, promise);
893                         mo_graph->commitChanges();
894                         break;
895                 }
896                 case READ_FROM_FUTURE: {
897                         /* Read from future value */
898                         struct future_value fv = node->get_future_value();
899                         Promise *promise = new Promise(curr, fv);
900                         value = fv.value;
901                         curr->set_read_from_promise(promise);
902                         promises->push_back(promise);
903                         mo_graph->startChanges();
904                         updated = r_modification_order(curr, promise);
905                         mo_graph->commitChanges();
906                         break;
907                 }
908                 default:
909                         ASSERT(false);
910                 }
911                 get_thread(curr)->set_return_value(value);
912                 return updated;
913         }
914 }
915
916 /**
917  * Processes a lock, trylock, or unlock model action.  @param curr is
918  * the read model action to process.
919  *
920  * The try lock operation checks whether the lock is taken.  If not,
921  * it falls to the normal lock operation case.  If so, it returns
922  * fail.
923  *
924  * The lock operation has already been checked that it is enabled, so
925  * it just grabs the lock and synchronizes with the previous unlock.
926  *
927  * The unlock operation has to re-enable all of the threads that are
928  * waiting on the lock.
929  *
930  * @return True if synchronization was updated; false otherwise
931  */
932 bool ModelChecker::process_mutex(ModelAction *curr)
933 {
934         std::mutex *mutex = NULL;
935         struct std::mutex_state *state = NULL;
936
937         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
938                 mutex = (std::mutex *)curr->get_location();
939                 state = mutex->get_state();
940         } else if (curr->is_wait()) {
941                 mutex = (std::mutex *)curr->get_value();
942                 state = mutex->get_state();
943         }
944
945         switch (curr->get_type()) {
946         case ATOMIC_TRYLOCK: {
947                 bool success = !state->islocked;
948                 curr->set_try_lock(success);
949                 if (!success) {
950                         get_thread(curr)->set_return_value(0);
951                         break;
952                 }
953                 get_thread(curr)->set_return_value(1);
954         }
955                 //otherwise fall into the lock case
956         case ATOMIC_LOCK: {
957                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
958                         assert_bug("Lock access before initialization");
959                 state->islocked = true;
960                 ModelAction *unlock = get_last_unlock(curr);
961                 //synchronize with the previous unlock statement
962                 if (unlock != NULL) {
963                         curr->synchronize_with(unlock);
964                         return true;
965                 }
966                 break;
967         }
968         case ATOMIC_UNLOCK: {
969                 //unlock the lock
970                 state->islocked = false;
971                 //wake up the other threads
972                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
973                 //activate all the waiting threads
974                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
975                         scheduler->wake(get_thread(*rit));
976                 }
977                 waiters->clear();
978                 break;
979         }
980         case ATOMIC_WAIT: {
981                 //unlock the lock
982                 state->islocked = false;
983                 //wake up the other threads
984                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
985                 //activate all the waiting threads
986                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
987                         scheduler->wake(get_thread(*rit));
988                 }
989                 waiters->clear();
990                 //check whether we should go to sleep or not...simulate spurious failures
991                 if (curr->get_node()->get_misc() == 0) {
992                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
993                         //disable us
994                         scheduler->sleep(get_thread(curr));
995                 }
996                 break;
997         }
998         case ATOMIC_NOTIFY_ALL: {
999                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1000                 //activate all the waiting threads
1001                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1002                         scheduler->wake(get_thread(*rit));
1003                 }
1004                 waiters->clear();
1005                 break;
1006         }
1007         case ATOMIC_NOTIFY_ONE: {
1008                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1009                 int wakeupthread = curr->get_node()->get_misc();
1010                 action_list_t::iterator it = waiters->begin();
1011                 advance(it, wakeupthread);
1012                 scheduler->wake(get_thread(*it));
1013                 waiters->erase(it);
1014                 break;
1015         }
1016
1017         default:
1018                 ASSERT(0);
1019         }
1020         return false;
1021 }
1022
1023 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1024 {
1025         /* Do more ambitious checks now that mo is more complete */
1026         if (mo_may_allow(writer, reader)) {
1027                 Node *node = reader->get_node();
1028
1029                 /* Find an ancestor thread which exists at the time of the reader */
1030                 Thread *write_thread = get_thread(writer);
1031                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1032                         write_thread = write_thread->get_parent();
1033
1034                 struct future_value fv = {
1035                         writer->get_write_value(),
1036                         writer->get_seq_number() + params.maxfuturedelay,
1037                         write_thread->get_id(),
1038                 };
1039                 if (node->add_future_value(fv))
1040                         set_latest_backtrack(reader);
1041         }
1042 }
1043
1044 /**
1045  * Process a write ModelAction
1046  * @param curr The ModelAction to process
1047  * @return True if the mo_graph was updated or promises were resolved
1048  */
1049 bool ModelChecker::process_write(ModelAction *curr)
1050 {
1051         /* Readers to which we may send our future value */
1052         std::vector< ModelAction *, ModelAlloc<ModelAction *> > send_fv;
1053
1054         bool updated_mod_order = w_modification_order(curr, &send_fv);
1055         int promise_idx = get_promise_to_resolve(curr);
1056         const ModelAction *earliest_promise_reader;
1057         bool updated_promises = false;
1058
1059         if (promise_idx >= 0) {
1060                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1061                 updated_promises = resolve_promise(curr, promise_idx);
1062         } else
1063                 earliest_promise_reader = NULL;
1064
1065         /* Don't send future values to reads after the Promise we resolve */
1066         for (unsigned int i = 0; i < send_fv.size(); i++) {
1067                 ModelAction *read = send_fv[i];
1068                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1069                         futurevalues->push_back(PendingFutureValue(curr, read));
1070         }
1071
1072         if (promises->size() == 0) {
1073                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1074                         struct PendingFutureValue pfv = (*futurevalues)[i];
1075                         add_future_value(pfv.writer, pfv.act);
1076                 }
1077                 futurevalues->clear();
1078         }
1079
1080         mo_graph->commitChanges();
1081         mo_check_promises(curr, false);
1082
1083         get_thread(curr)->set_return_value(VALUE_NONE);
1084         return updated_mod_order || updated_promises;
1085 }
1086
1087 /**
1088  * Process a fence ModelAction
1089  * @param curr The ModelAction to process
1090  * @return True if synchronization was updated
1091  */
1092 bool ModelChecker::process_fence(ModelAction *curr)
1093 {
1094         /*
1095          * fence-relaxed: no-op
1096          * fence-release: only log the occurence (not in this function), for
1097          *   use in later synchronization
1098          * fence-acquire (this function): search for hypothetical release
1099          *   sequences
1100          */
1101         bool updated = false;
1102         if (curr->is_acquire()) {
1103                 action_list_t *list = action_trace;
1104                 action_list_t::reverse_iterator rit;
1105                 /* Find X : is_read(X) && X --sb-> curr */
1106                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1107                         ModelAction *act = *rit;
1108                         if (act == curr)
1109                                 continue;
1110                         if (act->get_tid() != curr->get_tid())
1111                                 continue;
1112                         /* Stop at the beginning of the thread */
1113                         if (act->is_thread_start())
1114                                 break;
1115                         /* Stop once we reach a prior fence-acquire */
1116                         if (act->is_fence() && act->is_acquire())
1117                                 break;
1118                         if (!act->is_read())
1119                                 continue;
1120                         /* read-acquire will find its own release sequences */
1121                         if (act->is_acquire())
1122                                 continue;
1123
1124                         /* Establish hypothetical release sequences */
1125                         rel_heads_list_t release_heads;
1126                         get_release_seq_heads(curr, act, &release_heads);
1127                         for (unsigned int i = 0; i < release_heads.size(); i++)
1128                                 if (!curr->synchronize_with(release_heads[i]))
1129                                         set_bad_synchronization();
1130                         if (release_heads.size() != 0)
1131                                 updated = true;
1132                 }
1133         }
1134         return updated;
1135 }
1136
1137 /**
1138  * @brief Process the current action for thread-related activity
1139  *
1140  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1141  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1142  * synchronization, etc.  This function is a no-op for non-THREAD actions
1143  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1144  *
1145  * @param curr The current action
1146  * @return True if synchronization was updated or a thread completed
1147  */
1148 bool ModelChecker::process_thread_action(ModelAction *curr)
1149 {
1150         bool updated = false;
1151
1152         switch (curr->get_type()) {
1153         case THREAD_CREATE: {
1154                 thrd_t *thrd = (thrd_t *)curr->get_location();
1155                 struct thread_params *params = (struct thread_params *)curr->get_value();
1156                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1157                 add_thread(th);
1158                 th->set_creation(curr);
1159                 /* Promises can be satisfied by children */
1160                 for (unsigned int i = 0; i < promises->size(); i++) {
1161                         Promise *promise = (*promises)[i];
1162                         if (promise->thread_is_available(curr->get_tid()))
1163                                 promise->add_thread(th->get_id());
1164                 }
1165                 break;
1166         }
1167         case THREAD_JOIN: {
1168                 Thread *blocking = curr->get_thread_operand();
1169                 ModelAction *act = get_last_action(blocking->get_id());
1170                 curr->synchronize_with(act);
1171                 updated = true; /* trigger rel-seq checks */
1172                 break;
1173         }
1174         case THREAD_FINISH: {
1175                 Thread *th = get_thread(curr);
1176                 while (!th->wait_list_empty()) {
1177                         ModelAction *act = th->pop_wait_list();
1178                         scheduler->wake(get_thread(act));
1179                 }
1180                 th->complete();
1181                 /* Completed thread can't satisfy promises */
1182                 for (unsigned int i = 0; i < promises->size(); i++) {
1183                         Promise *promise = (*promises)[i];
1184                         if (promise->thread_is_available(th->get_id()))
1185                                 if (promise->eliminate_thread(th->get_id()))
1186                                         priv->failed_promise = true;
1187                 }
1188                 updated = true; /* trigger rel-seq checks */
1189                 break;
1190         }
1191         case THREAD_START: {
1192                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1193                 break;
1194         }
1195         default:
1196                 break;
1197         }
1198
1199         return updated;
1200 }
1201
1202 /**
1203  * @brief Process the current action for release sequence fixup activity
1204  *
1205  * Performs model-checker release sequence fixups for the current action,
1206  * forcing a single pending release sequence to break (with a given, potential
1207  * "loose" write) or to complete (i.e., synchronize). If a pending release
1208  * sequence forms a complete release sequence, then we must perform the fixup
1209  * synchronization, mo_graph additions, etc.
1210  *
1211  * @param curr The current action; must be a release sequence fixup action
1212  * @param work_queue The work queue to which to add work items as they are
1213  * generated
1214  */
1215 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1216 {
1217         const ModelAction *write = curr->get_node()->get_relseq_break();
1218         struct release_seq *sequence = pending_rel_seqs->back();
1219         pending_rel_seqs->pop_back();
1220         ASSERT(sequence);
1221         ModelAction *acquire = sequence->acquire;
1222         const ModelAction *rf = sequence->rf;
1223         const ModelAction *release = sequence->release;
1224         ASSERT(acquire);
1225         ASSERT(release);
1226         ASSERT(rf);
1227         ASSERT(release->same_thread(rf));
1228
1229         if (write == NULL) {
1230                 /**
1231                  * @todo Forcing a synchronization requires that we set
1232                  * modification order constraints. For instance, we can't allow
1233                  * a fixup sequence in which two separate read-acquire
1234                  * operations read from the same sequence, where the first one
1235                  * synchronizes and the other doesn't. Essentially, we can't
1236                  * allow any writes to insert themselves between 'release' and
1237                  * 'rf'
1238                  */
1239
1240                 /* Must synchronize */
1241                 if (!acquire->synchronize_with(release)) {
1242                         set_bad_synchronization();
1243                         return;
1244                 }
1245                 /* Re-check all pending release sequences */
1246                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1247                 /* Re-check act for mo_graph edges */
1248                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1249
1250                 /* propagate synchronization to later actions */
1251                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1252                 for (; (*rit) != acquire; rit++) {
1253                         ModelAction *propagate = *rit;
1254                         if (acquire->happens_before(propagate)) {
1255                                 propagate->synchronize_with(acquire);
1256                                 /* Re-check 'propagate' for mo_graph edges */
1257                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1258                         }
1259                 }
1260         } else {
1261                 /* Break release sequence with new edges:
1262                  *   release --mo--> write --mo--> rf */
1263                 mo_graph->addEdge(release, write);
1264                 mo_graph->addEdge(write, rf);
1265         }
1266
1267         /* See if we have realized a data race */
1268         checkDataRaces();
1269 }
1270
1271 /**
1272  * Initialize the current action by performing one or more of the following
1273  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1274  * in the NodeStack, manipulating backtracking sets, allocating and
1275  * initializing clock vectors, and computing the promises to fulfill.
1276  *
1277  * @param curr The current action, as passed from the user context; may be
1278  * freed/invalidated after the execution of this function, with a different
1279  * action "returned" its place (pass-by-reference)
1280  * @return True if curr is a newly-explored action; false otherwise
1281  */
1282 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1283 {
1284         ModelAction *newcurr;
1285
1286         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1287                 newcurr = process_rmw(*curr);
1288                 delete *curr;
1289
1290                 if (newcurr->is_rmw())
1291                         compute_promises(newcurr);
1292
1293                 *curr = newcurr;
1294                 return false;
1295         }
1296
1297         (*curr)->set_seq_number(get_next_seq_num());
1298
1299         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1300         if (newcurr) {
1301                 /* First restore type and order in case of RMW operation */
1302                 if ((*curr)->is_rmwr())
1303                         newcurr->copy_typeandorder(*curr);
1304
1305                 ASSERT((*curr)->get_location() == newcurr->get_location());
1306                 newcurr->copy_from_new(*curr);
1307
1308                 /* Discard duplicate ModelAction; use action from NodeStack */
1309                 delete *curr;
1310
1311                 /* Always compute new clock vector */
1312                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1313
1314                 *curr = newcurr;
1315                 return false; /* Action was explored previously */
1316         } else {
1317                 newcurr = *curr;
1318
1319                 /* Always compute new clock vector */
1320                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1321
1322                 /* Assign most recent release fence */
1323                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1324
1325                 /*
1326                  * Perform one-time actions when pushing new ModelAction onto
1327                  * NodeStack
1328                  */
1329                 if (newcurr->is_write())
1330                         compute_promises(newcurr);
1331                 else if (newcurr->is_relseq_fixup())
1332                         compute_relseq_breakwrites(newcurr);
1333                 else if (newcurr->is_wait())
1334                         newcurr->get_node()->set_misc_max(2);
1335                 else if (newcurr->is_notify_one()) {
1336                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1337                 }
1338                 return true; /* This was a new ModelAction */
1339         }
1340 }
1341
1342 /**
1343  * @brief Establish reads-from relation between two actions
1344  *
1345  * Perform basic operations involved with establishing a concrete rf relation,
1346  * including setting the ModelAction data and checking for release sequences.
1347  *
1348  * @param act The action that is reading (must be a read)
1349  * @param rf The action from which we are reading (must be a write)
1350  *
1351  * @return True if this read established synchronization
1352  */
1353 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1354 {
1355         ASSERT(rf);
1356         act->set_read_from(rf);
1357         if (act->is_acquire()) {
1358                 rel_heads_list_t release_heads;
1359                 get_release_seq_heads(act, act, &release_heads);
1360                 int num_heads = release_heads.size();
1361                 for (unsigned int i = 0; i < release_heads.size(); i++)
1362                         if (!act->synchronize_with(release_heads[i])) {
1363                                 set_bad_synchronization();
1364                                 num_heads--;
1365                         }
1366                 return num_heads > 0;
1367         }
1368         return false;
1369 }
1370
1371 /**
1372  * Check promises and eliminate potentially-satisfying threads when a thread is
1373  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1374  * no longer satisfy a promise generated from that thread.
1375  *
1376  * @param blocker The thread on which a thread is waiting
1377  * @param waiting The waiting thread
1378  */
1379 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1380 {
1381         for (unsigned int i = 0; i < promises->size(); i++) {
1382                 Promise *promise = (*promises)[i];
1383                 if (!promise->thread_is_available(waiting->get_id()))
1384                         continue;
1385                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1386                         ModelAction *reader = promise->get_reader(j);
1387                         if (reader->get_tid() != blocker->get_id())
1388                                 continue;
1389                         if (promise->eliminate_thread(waiting->get_id())) {
1390                                 /* Promise has failed */
1391                                 priv->failed_promise = true;
1392                         } else {
1393                                 /* Only eliminate the 'waiting' thread once */
1394                                 return;
1395                         }
1396                 }
1397         }
1398 }
1399
1400 /**
1401  * @brief Check whether a model action is enabled.
1402  *
1403  * Checks whether a lock or join operation would be successful (i.e., is the
1404  * lock already locked, or is the joined thread already complete). If not, put
1405  * the action in a waiter list.
1406  *
1407  * @param curr is the ModelAction to check whether it is enabled.
1408  * @return a bool that indicates whether the action is enabled.
1409  */
1410 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1411         if (curr->is_lock()) {
1412                 std::mutex *lock = (std::mutex *)curr->get_location();
1413                 struct std::mutex_state *state = lock->get_state();
1414                 if (state->islocked) {
1415                         //Stick the action in the appropriate waiting queue
1416                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1417                         return false;
1418                 }
1419         } else if (curr->get_type() == THREAD_JOIN) {
1420                 Thread *blocking = (Thread *)curr->get_location();
1421                 if (!blocking->is_complete()) {
1422                         blocking->push_wait_list(curr);
1423                         thread_blocking_check_promises(blocking, get_thread(curr));
1424                         return false;
1425                 }
1426         }
1427
1428         return true;
1429 }
1430
1431 /**
1432  * This is the heart of the model checker routine. It performs model-checking
1433  * actions corresponding to a given "current action." Among other processes, it
1434  * calculates reads-from relationships, updates synchronization clock vectors,
1435  * forms a memory_order constraints graph, and handles replay/backtrack
1436  * execution when running permutations of previously-observed executions.
1437  *
1438  * @param curr The current action to process
1439  * @return The ModelAction that is actually executed; may be different than
1440  * curr; may be NULL, if the current action is not enabled to run
1441  */
1442 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1443 {
1444         ASSERT(curr);
1445         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1446
1447         if (!check_action_enabled(curr)) {
1448                 /* Make the execution look like we chose to run this action
1449                  * much later, when a lock/join can succeed */
1450                 get_thread(curr)->set_pending(curr);
1451                 scheduler->sleep(get_thread(curr));
1452                 return NULL;
1453         }
1454
1455         bool newly_explored = initialize_curr_action(&curr);
1456
1457         DBG();
1458         if (DBG_ENABLED())
1459                 curr->print();
1460
1461         wake_up_sleeping_actions(curr);
1462
1463         /* Add the action to lists before any other model-checking tasks */
1464         if (!second_part_of_rmw)
1465                 add_action_to_lists(curr);
1466
1467         /* Build may_read_from set for newly-created actions */
1468         if (newly_explored && curr->is_read())
1469                 build_may_read_from(curr);
1470
1471         /* Initialize work_queue with the "current action" work */
1472         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1473         while (!work_queue.empty() && !has_asserted()) {
1474                 WorkQueueEntry work = work_queue.front();
1475                 work_queue.pop_front();
1476
1477                 switch (work.type) {
1478                 case WORK_CHECK_CURR_ACTION: {
1479                         ModelAction *act = work.action;
1480                         bool update = false; /* update this location's release seq's */
1481                         bool update_all = false; /* update all release seq's */
1482
1483                         if (process_thread_action(curr))
1484                                 update_all = true;
1485
1486                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1487                                 update = true;
1488
1489                         if (act->is_write() && process_write(act))
1490                                 update = true;
1491
1492                         if (act->is_fence() && process_fence(act))
1493                                 update_all = true;
1494
1495                         if (act->is_mutex_op() && process_mutex(act))
1496                                 update_all = true;
1497
1498                         if (act->is_relseq_fixup())
1499                                 process_relseq_fixup(curr, &work_queue);
1500
1501                         if (update_all)
1502                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1503                         else if (update)
1504                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1505                         break;
1506                 }
1507                 case WORK_CHECK_RELEASE_SEQ:
1508                         resolve_release_sequences(work.location, &work_queue);
1509                         break;
1510                 case WORK_CHECK_MO_EDGES: {
1511                         /** @todo Complete verification of work_queue */
1512                         ModelAction *act = work.action;
1513                         bool updated = false;
1514
1515                         if (act->is_read()) {
1516                                 const ModelAction *rf = act->get_reads_from();
1517                                 const Promise *promise = act->get_reads_from_promise();
1518                                 if (rf) {
1519                                         if (r_modification_order(act, rf))
1520                                                 updated = true;
1521                                 } else if (promise) {
1522                                         if (r_modification_order(act, promise))
1523                                                 updated = true;
1524                                 }
1525                         }
1526                         if (act->is_write()) {
1527                                 if (w_modification_order(act, NULL))
1528                                         updated = true;
1529                         }
1530                         mo_graph->commitChanges();
1531
1532                         if (updated)
1533                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1534                         break;
1535                 }
1536                 default:
1537                         ASSERT(false);
1538                         break;
1539                 }
1540         }
1541
1542         check_curr_backtracking(curr);
1543         set_backtracking(curr);
1544         return curr;
1545 }
1546
1547 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1548 {
1549         Node *currnode = curr->get_node();
1550         Node *parnode = currnode->get_parent();
1551
1552         if ((parnode && !parnode->backtrack_empty()) ||
1553                          !currnode->misc_empty() ||
1554                          !currnode->read_from_empty() ||
1555                          !currnode->promise_empty() ||
1556                          !currnode->relseq_break_empty()) {
1557                 set_latest_backtrack(curr);
1558         }
1559 }
1560
1561 bool ModelChecker::promises_expired() const
1562 {
1563         for (unsigned int i = 0; i < promises->size(); i++) {
1564                 Promise *promise = (*promises)[i];
1565                 if (promise->get_expiration() < priv->used_sequence_numbers)
1566                         return true;
1567         }
1568         return false;
1569 }
1570
1571 /**
1572  * This is the strongest feasibility check available.
1573  * @return whether the current trace (partial or complete) must be a prefix of
1574  * a feasible trace.
1575  */
1576 bool ModelChecker::isfeasibleprefix() const
1577 {
1578         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1579 }
1580
1581 /**
1582  * Print disagnostic information about an infeasible execution
1583  * @param prefix A string to prefix the output with; if NULL, then a default
1584  * message prefix will be provided
1585  */
1586 void ModelChecker::print_infeasibility(const char *prefix) const
1587 {
1588         char buf[100];
1589         char *ptr = buf;
1590         if (mo_graph->checkForCycles())
1591                 ptr += sprintf(ptr, "[mo cycle]");
1592         if (priv->failed_promise)
1593                 ptr += sprintf(ptr, "[failed promise]");
1594         if (priv->too_many_reads)
1595                 ptr += sprintf(ptr, "[too many reads]");
1596         if (priv->no_valid_reads)
1597                 ptr += sprintf(ptr, "[no valid reads-from]");
1598         if (priv->bad_synchronization)
1599                 ptr += sprintf(ptr, "[bad sw ordering]");
1600         if (promises_expired())
1601                 ptr += sprintf(ptr, "[promise expired]");
1602         if (promises->size() != 0)
1603                 ptr += sprintf(ptr, "[unresolved promise]");
1604         if (ptr != buf)
1605                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1606 }
1607
1608 /**
1609  * Returns whether the current completed trace is feasible, except for pending
1610  * release sequences.
1611  */
1612 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1613 {
1614         return !is_infeasible() && promises->size() == 0;
1615 }
1616
1617 /**
1618  * Check if the current partial trace is infeasible. Does not check any
1619  * end-of-execution flags, which might rule out the execution. Thus, this is
1620  * useful only for ruling an execution as infeasible.
1621  * @return whether the current partial trace is infeasible.
1622  */
1623 bool ModelChecker::is_infeasible() const
1624 {
1625         return mo_graph->checkForCycles() ||
1626                 priv->no_valid_reads ||
1627                 priv->failed_promise ||
1628                 priv->too_many_reads ||
1629                 priv->bad_synchronization ||
1630                 promises_expired();
1631 }
1632
1633 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1634 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1635         ModelAction *lastread = get_last_action(act->get_tid());
1636         lastread->process_rmw(act);
1637         if (act->is_rmw()) {
1638                 if (lastread->get_reads_from())
1639                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1640                 else
1641                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1642                 mo_graph->commitChanges();
1643         }
1644         return lastread;
1645 }
1646
1647 /**
1648  * A helper function for ModelChecker::check_recency, to check if the current
1649  * thread is able to read from a different write/promise for 'params.maxreads'
1650  * number of steps and if that write/promise should become visible (i.e., is
1651  * ordered later in the modification order). This helps model memory liveness.
1652  *
1653  * @param curr The current action. Must be a read.
1654  * @param rf The write/promise from which we plan to read
1655  * @param other_rf The write/promise from which we may read
1656  * @return True if we were able to read from other_rf for params.maxreads steps
1657  */
1658 template <typename T, typename U>
1659 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1660 {
1661         /* Need a different write/promise */
1662         if (other_rf->equals(rf))
1663                 return false;
1664
1665         /* Only look for "newer" writes/promises */
1666         if (!mo_graph->checkReachable(rf, other_rf))
1667                 return false;
1668
1669         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1670         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1671         action_list_t::reverse_iterator rit = list->rbegin();
1672         ASSERT((*rit) == curr);
1673         /* Skip past curr */
1674         rit++;
1675
1676         /* Does this write/promise work for everyone? */
1677         for (int i = 0; i < params.maxreads; i++, rit++) {
1678                 ModelAction *act = *rit;
1679                 if (!act->may_read_from(other_rf))
1680                         return false;
1681         }
1682         return true;
1683 }
1684
1685 /**
1686  * Checks whether a thread has read from the same write or Promise for too many
1687  * times without seeing the effects of a later write/Promise.
1688  *
1689  * Basic idea:
1690  * 1) there must a different write/promise that we could read from,
1691  * 2) we must have read from the same write/promise in excess of maxreads times,
1692  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1693  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1694  *
1695  * If so, we decide that the execution is no longer feasible.
1696  *
1697  * @param curr The current action. Must be a read.
1698  * @param rf The ModelAction/Promise from which we might read.
1699  * @return True if the read should succeed; false otherwise
1700  */
1701 template <typename T>
1702 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1703 {
1704         if (!params.maxreads)
1705                 return true;
1706
1707         //NOTE: Next check is just optimization, not really necessary....
1708         if (curr->get_node()->get_read_from_past_size() +
1709                         curr->get_node()->get_read_from_promise_size() <= 1)
1710                 return true;
1711
1712         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1713         int tid = id_to_int(curr->get_tid());
1714         ASSERT(tid < (int)thrd_lists->size());
1715         action_list_t *list = &(*thrd_lists)[tid];
1716         action_list_t::reverse_iterator rit = list->rbegin();
1717         ASSERT((*rit) == curr);
1718         /* Skip past curr */
1719         rit++;
1720
1721         action_list_t::reverse_iterator ritcopy = rit;
1722         /* See if we have enough reads from the same value */
1723         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1724                 if (ritcopy == list->rend())
1725                         return true;
1726                 ModelAction *act = *ritcopy;
1727                 if (!act->is_read())
1728                         return true;
1729                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1730                         return true;
1731                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1732                         return true;
1733                 if (act->get_node()->get_read_from_past_size() +
1734                                 act->get_node()->get_read_from_promise_size() <= 1)
1735                         return true;
1736         }
1737         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1738                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1739                 if (should_read_instead(curr, rf, write))
1740                         return false; /* liveness failure */
1741         }
1742         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1743                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1744                 if (should_read_instead(curr, rf, promise))
1745                         return false; /* liveness failure */
1746         }
1747         return true;
1748 }
1749
1750 /**
1751  * Updates the mo_graph with the constraints imposed from the current
1752  * read.
1753  *
1754  * Basic idea is the following: Go through each other thread and find
1755  * the last action that happened before our read.  Two cases:
1756  *
1757  * (1) The action is a write => that write must either occur before
1758  * the write we read from or be the write we read from.
1759  *
1760  * (2) The action is a read => the write that that action read from
1761  * must occur before the write we read from or be the same write.
1762  *
1763  * @param curr The current action. Must be a read.
1764  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1765  * @return True if modification order edges were added; false otherwise
1766  */
1767 template <typename rf_type>
1768 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1769 {
1770         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1771         unsigned int i;
1772         bool added = false;
1773         ASSERT(curr->is_read());
1774
1775         /* Last SC fence in the current thread */
1776         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1777
1778         /* Iterate over all threads */
1779         for (i = 0; i < thrd_lists->size(); i++) {
1780                 /* Last SC fence in thread i */
1781                 ModelAction *last_sc_fence_thread_local = NULL;
1782                 if (int_to_id((int)i) != curr->get_tid())
1783                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1784
1785                 /* Last SC fence in thread i, before last SC fence in current thread */
1786                 ModelAction *last_sc_fence_thread_before = NULL;
1787                 if (last_sc_fence_local)
1788                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1789
1790                 /* Iterate over actions in thread, starting from most recent */
1791                 action_list_t *list = &(*thrd_lists)[i];
1792                 action_list_t::reverse_iterator rit;
1793                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1794                         ModelAction *act = *rit;
1795
1796                         if (act->is_write() && !act->equals(rf) && act != curr) {
1797                                 /* C++, Section 29.3 statement 5 */
1798                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1799                                                 *act < *last_sc_fence_thread_local) {
1800                                         added = mo_graph->addEdge(act, rf) || added;
1801                                         break;
1802                                 }
1803                                 /* C++, Section 29.3 statement 4 */
1804                                 else if (act->is_seqcst() && last_sc_fence_local &&
1805                                                 *act < *last_sc_fence_local) {
1806                                         added = mo_graph->addEdge(act, rf) || added;
1807                                         break;
1808                                 }
1809                                 /* C++, Section 29.3 statement 6 */
1810                                 else if (last_sc_fence_thread_before &&
1811                                                 *act < *last_sc_fence_thread_before) {
1812                                         added = mo_graph->addEdge(act, rf) || added;
1813                                         break;
1814                                 }
1815                         }
1816
1817                         /*
1818                          * Include at most one act per-thread that "happens
1819                          * before" curr. Don't consider reflexively.
1820                          */
1821                         if (act->happens_before(curr) && act != curr) {
1822                                 if (act->is_write()) {
1823                                         if (!act->equals(rf)) {
1824                                                 added = mo_graph->addEdge(act, rf) || added;
1825                                         }
1826                                 } else {
1827                                         const ModelAction *prevrf = act->get_reads_from();
1828                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1829                                         if (prevrf) {
1830                                                 if (!prevrf->equals(rf))
1831                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1832                                         } else if (!prevrf_promise->equals(rf)) {
1833                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1834                                         }
1835                                 }
1836                                 break;
1837                         }
1838                 }
1839         }
1840
1841         /*
1842          * All compatible, thread-exclusive promises must be ordered after any
1843          * concrete loads from the same thread
1844          */
1845         for (unsigned int i = 0; i < promises->size(); i++)
1846                 if ((*promises)[i]->is_compatible_exclusive(curr))
1847                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1848
1849         return added;
1850 }
1851
1852 /**
1853  * Updates the mo_graph with the constraints imposed from the current write.
1854  *
1855  * Basic idea is the following: Go through each other thread and find
1856  * the lastest action that happened before our write.  Two cases:
1857  *
1858  * (1) The action is a write => that write must occur before
1859  * the current write
1860  *
1861  * (2) The action is a read => the write that that action read from
1862  * must occur before the current write.
1863  *
1864  * This method also handles two other issues:
1865  *
1866  * (I) Sequential Consistency: Making sure that if the current write is
1867  * seq_cst, that it occurs after the previous seq_cst write.
1868  *
1869  * (II) Sending the write back to non-synchronizing reads.
1870  *
1871  * @param curr The current action. Must be a write.
1872  * @param send_fv A vector for stashing reads to which we may pass our future
1873  * value. If NULL, then don't record any future values.
1874  * @return True if modification order edges were added; false otherwise
1875  */
1876 bool ModelChecker::w_modification_order(ModelAction *curr, std::vector< ModelAction *, ModelAlloc<ModelAction *> > *send_fv)
1877 {
1878         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1879         unsigned int i;
1880         bool added = false;
1881         ASSERT(curr->is_write());
1882
1883         if (curr->is_seqcst()) {
1884                 /* We have to at least see the last sequentially consistent write,
1885                          so we are initialized. */
1886                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1887                 if (last_seq_cst != NULL) {
1888                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1889                 }
1890         }
1891
1892         /* Last SC fence in the current thread */
1893         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1894
1895         /* Iterate over all threads */
1896         for (i = 0; i < thrd_lists->size(); i++) {
1897                 /* Last SC fence in thread i, before last SC fence in current thread */
1898                 ModelAction *last_sc_fence_thread_before = NULL;
1899                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1900                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1901
1902                 /* Iterate over actions in thread, starting from most recent */
1903                 action_list_t *list = &(*thrd_lists)[i];
1904                 action_list_t::reverse_iterator rit;
1905                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1906                         ModelAction *act = *rit;
1907                         if (act == curr) {
1908                                 /*
1909                                  * 1) If RMW and it actually read from something, then we
1910                                  * already have all relevant edges, so just skip to next
1911                                  * thread.
1912                                  *
1913                                  * 2) If RMW and it didn't read from anything, we should
1914                                  * whatever edge we can get to speed up convergence.
1915                                  *
1916                                  * 3) If normal write, we need to look at earlier actions, so
1917                                  * continue processing list.
1918                                  */
1919                                 if (curr->is_rmw()) {
1920                                         if (curr->get_reads_from() != NULL)
1921                                                 break;
1922                                         else
1923                                                 continue;
1924                                 } else
1925                                         continue;
1926                         }
1927
1928                         /* C++, Section 29.3 statement 7 */
1929                         if (last_sc_fence_thread_before && act->is_write() &&
1930                                         *act < *last_sc_fence_thread_before) {
1931                                 added = mo_graph->addEdge(act, curr) || added;
1932                                 break;
1933                         }
1934
1935                         /*
1936                          * Include at most one act per-thread that "happens
1937                          * before" curr
1938                          */
1939                         if (act->happens_before(curr)) {
1940                                 /*
1941                                  * Note: if act is RMW, just add edge:
1942                                  *   act --mo--> curr
1943                                  * The following edge should be handled elsewhere:
1944                                  *   readfrom(act) --mo--> act
1945                                  */
1946                                 if (act->is_write())
1947                                         added = mo_graph->addEdge(act, curr) || added;
1948                                 else if (act->is_read()) {
1949                                         //if previous read accessed a null, just keep going
1950                                         if (act->get_reads_from() == NULL)
1951                                                 continue;
1952                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1953                                 }
1954                                 break;
1955                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1956                                                      !act->same_thread(curr)) {
1957                                 /* We have an action that:
1958                                    (1) did not happen before us
1959                                    (2) is a read and we are a write
1960                                    (3) cannot synchronize with us
1961                                    (4) is in a different thread
1962                                    =>
1963                                    that read could potentially read from our write.  Note that
1964                                    these checks are overly conservative at this point, we'll
1965                                    do more checks before actually removing the
1966                                    pendingfuturevalue.
1967
1968                                  */
1969                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1970                                         if (!is_infeasible())
1971                                                 send_fv->push_back(act);
1972                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1973                                                 add_future_value(curr, act);
1974                                 }
1975                         }
1976                 }
1977         }
1978
1979         /*
1980          * All compatible, thread-exclusive promises must be ordered after any
1981          * concrete stores to the same thread, or else they can be merged with
1982          * this store later
1983          */
1984         for (unsigned int i = 0; i < promises->size(); i++)
1985                 if ((*promises)[i]->is_compatible_exclusive(curr))
1986                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1987
1988         return added;
1989 }
1990
1991 /** Arbitrary reads from the future are not allowed.  Section 29.3
1992  * part 9 places some constraints.  This method checks one result of constraint
1993  * constraint.  Others require compiler support. */
1994 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1995 {
1996         if (!writer->is_rmw())
1997                 return true;
1998
1999         if (!reader->is_rmw())
2000                 return true;
2001
2002         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2003                 if (search == reader)
2004                         return false;
2005                 if (search->get_tid() == reader->get_tid() &&
2006                                 search->happens_before(reader))
2007                         break;
2008         }
2009
2010         return true;
2011 }
2012
2013 /**
2014  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2015  * some constraints. This method checks one the following constraint (others
2016  * require compiler support):
2017  *
2018  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2019  */
2020 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2021 {
2022         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2023         unsigned int i;
2024         /* Iterate over all threads */
2025         for (i = 0; i < thrd_lists->size(); i++) {
2026                 const ModelAction *write_after_read = NULL;
2027
2028                 /* Iterate over actions in thread, starting from most recent */
2029                 action_list_t *list = &(*thrd_lists)[i];
2030                 action_list_t::reverse_iterator rit;
2031                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2032                         ModelAction *act = *rit;
2033
2034                         /* Don't disallow due to act == reader */
2035                         if (!reader->happens_before(act) || reader == act)
2036                                 break;
2037                         else if (act->is_write())
2038                                 write_after_read = act;
2039                         else if (act->is_read() && act->get_reads_from() != NULL)
2040                                 write_after_read = act->get_reads_from();
2041                 }
2042
2043                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2044                         return false;
2045         }
2046         return true;
2047 }
2048
2049 /**
2050  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2051  * The ModelAction under consideration is expected to be taking part in
2052  * release/acquire synchronization as an object of the "reads from" relation.
2053  * Note that this can only provide release sequence support for RMW chains
2054  * which do not read from the future, as those actions cannot be traced until
2055  * their "promise" is fulfilled. Similarly, we may not even establish the
2056  * presence of a release sequence with certainty, as some modification order
2057  * constraints may be decided further in the future. Thus, this function
2058  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2059  * and a boolean representing certainty.
2060  *
2061  * @param rf The action that might be part of a release sequence. Must be a
2062  * write.
2063  * @param release_heads A pass-by-reference style return parameter. After
2064  * execution of this function, release_heads will contain the heads of all the
2065  * relevant release sequences, if any exists with certainty
2066  * @param pending A pass-by-reference style return parameter which is only used
2067  * when returning false (i.e., uncertain). Returns most information regarding
2068  * an uncertain release sequence, including any write operations that might
2069  * break the sequence.
2070  * @return true, if the ModelChecker is certain that release_heads is complete;
2071  * false otherwise
2072  */
2073 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2074                 rel_heads_list_t *release_heads,
2075                 struct release_seq *pending) const
2076 {
2077         /* Only check for release sequences if there are no cycles */
2078         if (mo_graph->checkForCycles())
2079                 return false;
2080
2081         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2082                 ASSERT(rf->is_write());
2083
2084                 if (rf->is_release())
2085                         release_heads->push_back(rf);
2086                 else if (rf->get_last_fence_release())
2087                         release_heads->push_back(rf->get_last_fence_release());
2088                 if (!rf->is_rmw())
2089                         break; /* End of RMW chain */
2090
2091                 /** @todo Need to be smarter here...  In the linux lock
2092                  * example, this will run to the beginning of the program for
2093                  * every acquire. */
2094                 /** @todo The way to be smarter here is to keep going until 1
2095                  * thread has a release preceded by an acquire and you've seen
2096                  *       both. */
2097
2098                 /* acq_rel RMW is a sufficient stopping condition */
2099                 if (rf->is_acquire() && rf->is_release())
2100                         return true; /* complete */
2101         };
2102         if (!rf) {
2103                 /* read from future: need to settle this later */
2104                 pending->rf = NULL;
2105                 return false; /* incomplete */
2106         }
2107
2108         if (rf->is_release())
2109                 return true; /* complete */
2110
2111         /* else relaxed write
2112          * - check for fence-release in the same thread (29.8, stmt. 3)
2113          * - check modification order for contiguous subsequence
2114          *   -> rf must be same thread as release */
2115
2116         const ModelAction *fence_release = rf->get_last_fence_release();
2117         /* Synchronize with a fence-release unconditionally; we don't need to
2118          * find any more "contiguous subsequence..." for it */
2119         if (fence_release)
2120                 release_heads->push_back(fence_release);
2121
2122         int tid = id_to_int(rf->get_tid());
2123         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2124         action_list_t *list = &(*thrd_lists)[tid];
2125         action_list_t::const_reverse_iterator rit;
2126
2127         /* Find rf in the thread list */
2128         rit = std::find(list->rbegin(), list->rend(), rf);
2129         ASSERT(rit != list->rend());
2130
2131         /* Find the last {write,fence}-release */
2132         for (; rit != list->rend(); rit++) {
2133                 if (fence_release && *(*rit) < *fence_release)
2134                         break;
2135                 if ((*rit)->is_release())
2136                         break;
2137         }
2138         if (rit == list->rend()) {
2139                 /* No write-release in this thread */
2140                 return true; /* complete */
2141         } else if (fence_release && *(*rit) < *fence_release) {
2142                 /* The fence-release is more recent (and so, "stronger") than
2143                  * the most recent write-release */
2144                 return true; /* complete */
2145         } /* else, need to establish contiguous release sequence */
2146         ModelAction *release = *rit;
2147
2148         ASSERT(rf->same_thread(release));
2149
2150         pending->writes.clear();
2151
2152         bool certain = true;
2153         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2154                 if (id_to_int(rf->get_tid()) == (int)i)
2155                         continue;
2156                 list = &(*thrd_lists)[i];
2157
2158                 /* Can we ensure no future writes from this thread may break
2159                  * the release seq? */
2160                 bool future_ordered = false;
2161
2162                 ModelAction *last = get_last_action(int_to_id(i));
2163                 Thread *th = get_thread(int_to_id(i));
2164                 if ((last && rf->happens_before(last)) ||
2165                                 !is_enabled(th) ||
2166                                 th->is_complete())
2167                         future_ordered = true;
2168
2169                 ASSERT(!th->is_model_thread() || future_ordered);
2170
2171                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2172                         const ModelAction *act = *rit;
2173                         /* Reach synchronization -> this thread is complete */
2174                         if (act->happens_before(release))
2175                                 break;
2176                         if (rf->happens_before(act)) {
2177                                 future_ordered = true;
2178                                 continue;
2179                         }
2180
2181                         /* Only non-RMW writes can break release sequences */
2182                         if (!act->is_write() || act->is_rmw())
2183                                 continue;
2184
2185                         /* Check modification order */
2186                         if (mo_graph->checkReachable(rf, act)) {
2187                                 /* rf --mo--> act */
2188                                 future_ordered = true;
2189                                 continue;
2190                         }
2191                         if (mo_graph->checkReachable(act, release))
2192                                 /* act --mo--> release */
2193                                 break;
2194                         if (mo_graph->checkReachable(release, act) &&
2195                                       mo_graph->checkReachable(act, rf)) {
2196                                 /* release --mo-> act --mo--> rf */
2197                                 return true; /* complete */
2198                         }
2199                         /* act may break release sequence */
2200                         pending->writes.push_back(act);
2201                         certain = false;
2202                 }
2203                 if (!future_ordered)
2204                         certain = false; /* This thread is uncertain */
2205         }
2206
2207         if (certain) {
2208                 release_heads->push_back(release);
2209                 pending->writes.clear();
2210         } else {
2211                 pending->release = release;
2212                 pending->rf = rf;
2213         }
2214         return certain;
2215 }
2216
2217 /**
2218  * An interface for getting the release sequence head(s) with which a
2219  * given ModelAction must synchronize. This function only returns a non-empty
2220  * result when it can locate a release sequence head with certainty. Otherwise,
2221  * it may mark the internal state of the ModelChecker so that it will handle
2222  * the release sequence at a later time, causing @a acquire to update its
2223  * synchronization at some later point in execution.
2224  *
2225  * @param acquire The 'acquire' action that may synchronize with a release
2226  * sequence
2227  * @param read The read action that may read from a release sequence; this may
2228  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2229  * when 'acquire' is a fence-acquire)
2230  * @param release_heads A pass-by-reference return parameter. Will be filled
2231  * with the head(s) of the release sequence(s), if they exists with certainty.
2232  * @see ModelChecker::release_seq_heads
2233  */
2234 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2235                 ModelAction *read, rel_heads_list_t *release_heads)
2236 {
2237         const ModelAction *rf = read->get_reads_from();
2238         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2239         sequence->acquire = acquire;
2240         sequence->read = read;
2241
2242         if (!release_seq_heads(rf, release_heads, sequence)) {
2243                 /* add act to 'lazy checking' list */
2244                 pending_rel_seqs->push_back(sequence);
2245         } else {
2246                 snapshot_free(sequence);
2247         }
2248 }
2249
2250 /**
2251  * Attempt to resolve all stashed operations that might synchronize with a
2252  * release sequence for a given location. This implements the "lazy" portion of
2253  * determining whether or not a release sequence was contiguous, since not all
2254  * modification order information is present at the time an action occurs.
2255  *
2256  * @param location The location/object that should be checked for release
2257  * sequence resolutions. A NULL value means to check all locations.
2258  * @param work_queue The work queue to which to add work items as they are
2259  * generated
2260  * @return True if any updates occurred (new synchronization, new mo_graph
2261  * edges)
2262  */
2263 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2264 {
2265         bool updated = false;
2266         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2267         while (it != pending_rel_seqs->end()) {
2268                 struct release_seq *pending = *it;
2269                 ModelAction *acquire = pending->acquire;
2270                 const ModelAction *read = pending->read;
2271
2272                 /* Only resolve sequences on the given location, if provided */
2273                 if (location && read->get_location() != location) {
2274                         it++;
2275                         continue;
2276                 }
2277
2278                 const ModelAction *rf = read->get_reads_from();
2279                 rel_heads_list_t release_heads;
2280                 bool complete;
2281                 complete = release_seq_heads(rf, &release_heads, pending);
2282                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2283                         if (!acquire->has_synchronized_with(release_heads[i])) {
2284                                 if (acquire->synchronize_with(release_heads[i]))
2285                                         updated = true;
2286                                 else
2287                                         set_bad_synchronization();
2288                         }
2289                 }
2290
2291                 if (updated) {
2292                         /* Re-check all pending release sequences */
2293                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2294                         /* Re-check read-acquire for mo_graph edges */
2295                         if (acquire->is_read())
2296                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2297
2298                         /* propagate synchronization to later actions */
2299                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2300                         for (; (*rit) != acquire; rit++) {
2301                                 ModelAction *propagate = *rit;
2302                                 if (acquire->happens_before(propagate)) {
2303                                         propagate->synchronize_with(acquire);
2304                                         /* Re-check 'propagate' for mo_graph edges */
2305                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2306                                 }
2307                         }
2308                 }
2309                 if (complete) {
2310                         it = pending_rel_seqs->erase(it);
2311                         snapshot_free(pending);
2312                 } else {
2313                         it++;
2314                 }
2315         }
2316
2317         // If we resolved promises or data races, see if we have realized a data race.
2318         checkDataRaces();
2319
2320         return updated;
2321 }
2322
2323 /**
2324  * Performs various bookkeeping operations for the current ModelAction. For
2325  * instance, adds action to the per-object, per-thread action vector and to the
2326  * action trace list of all thread actions.
2327  *
2328  * @param act is the ModelAction to add.
2329  */
2330 void ModelChecker::add_action_to_lists(ModelAction *act)
2331 {
2332         int tid = id_to_int(act->get_tid());
2333         ModelAction *uninit = NULL;
2334         int uninit_id = -1;
2335         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2336         if (list->empty() && act->is_atomic_var()) {
2337                 uninit = new_uninitialized_action(act->get_location());
2338                 uninit_id = id_to_int(uninit->get_tid());
2339                 list->push_back(uninit);
2340         }
2341         list->push_back(act);
2342
2343         action_trace->push_back(act);
2344         if (uninit)
2345                 action_trace->push_front(uninit);
2346
2347         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2348         if (tid >= (int)vec->size())
2349                 vec->resize(priv->next_thread_id);
2350         (*vec)[tid].push_back(act);
2351         if (uninit)
2352                 (*vec)[uninit_id].push_front(uninit);
2353
2354         if ((int)thrd_last_action->size() <= tid)
2355                 thrd_last_action->resize(get_num_threads());
2356         (*thrd_last_action)[tid] = act;
2357         if (uninit)
2358                 (*thrd_last_action)[uninit_id] = uninit;
2359
2360         if (act->is_fence() && act->is_release()) {
2361                 if ((int)thrd_last_fence_release->size() <= tid)
2362                         thrd_last_fence_release->resize(get_num_threads());
2363                 (*thrd_last_fence_release)[tid] = act;
2364         }
2365
2366         if (act->is_wait()) {
2367                 void *mutex_loc = (void *) act->get_value();
2368                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2369
2370                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2371                 if (tid >= (int)vec->size())
2372                         vec->resize(priv->next_thread_id);
2373                 (*vec)[tid].push_back(act);
2374         }
2375 }
2376
2377 /**
2378  * @brief Get the last action performed by a particular Thread
2379  * @param tid The thread ID of the Thread in question
2380  * @return The last action in the thread
2381  */
2382 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2383 {
2384         int threadid = id_to_int(tid);
2385         if (threadid < (int)thrd_last_action->size())
2386                 return (*thrd_last_action)[id_to_int(tid)];
2387         else
2388                 return NULL;
2389 }
2390
2391 /**
2392  * @brief Get the last fence release performed by a particular Thread
2393  * @param tid The thread ID of the Thread in question
2394  * @return The last fence release in the thread, if one exists; NULL otherwise
2395  */
2396 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2397 {
2398         int threadid = id_to_int(tid);
2399         if (threadid < (int)thrd_last_fence_release->size())
2400                 return (*thrd_last_fence_release)[id_to_int(tid)];
2401         else
2402                 return NULL;
2403 }
2404
2405 /**
2406  * Gets the last memory_order_seq_cst write (in the total global sequence)
2407  * performed on a particular object (i.e., memory location), not including the
2408  * current action.
2409  * @param curr The current ModelAction; also denotes the object location to
2410  * check
2411  * @return The last seq_cst write
2412  */
2413 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2414 {
2415         void *location = curr->get_location();
2416         action_list_t *list = get_safe_ptr_action(obj_map, location);
2417         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2418         action_list_t::reverse_iterator rit;
2419         for (rit = list->rbegin(); rit != list->rend(); rit++)
2420                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2421                         return *rit;
2422         return NULL;
2423 }
2424
2425 /**
2426  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2427  * performed in a particular thread, prior to a particular fence.
2428  * @param tid The ID of the thread to check
2429  * @param before_fence The fence from which to begin the search; if NULL, then
2430  * search for the most recent fence in the thread.
2431  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2432  */
2433 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2434 {
2435         /* All fences should have NULL location */
2436         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2437         action_list_t::reverse_iterator rit = list->rbegin();
2438
2439         if (before_fence) {
2440                 for (; rit != list->rend(); rit++)
2441                         if (*rit == before_fence)
2442                                 break;
2443
2444                 ASSERT(*rit == before_fence);
2445                 rit++;
2446         }
2447
2448         for (; rit != list->rend(); rit++)
2449                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2450                         return *rit;
2451         return NULL;
2452 }
2453
2454 /**
2455  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2456  * location). This function identifies the mutex according to the current
2457  * action, which is presumed to perform on the same mutex.
2458  * @param curr The current ModelAction; also denotes the object location to
2459  * check
2460  * @return The last unlock operation
2461  */
2462 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2463 {
2464         void *location = curr->get_location();
2465         action_list_t *list = get_safe_ptr_action(obj_map, location);
2466         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2467         action_list_t::reverse_iterator rit;
2468         for (rit = list->rbegin(); rit != list->rend(); rit++)
2469                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2470                         return *rit;
2471         return NULL;
2472 }
2473
2474 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2475 {
2476         ModelAction *parent = get_last_action(tid);
2477         if (!parent)
2478                 parent = get_thread(tid)->get_creation();
2479         return parent;
2480 }
2481
2482 /**
2483  * Returns the clock vector for a given thread.
2484  * @param tid The thread whose clock vector we want
2485  * @return Desired clock vector
2486  */
2487 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2488 {
2489         return get_parent_action(tid)->get_cv();
2490 }
2491
2492 /**
2493  * @brief Find the promise, if any to resolve for the current action
2494  * @param curr The current ModelAction. Should be a write.
2495  * @return The (non-negative) index for the Promise to resolve, if any;
2496  * otherwise -1
2497  */
2498 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2499 {
2500         for (unsigned int i = 0; i < promises->size(); i++)
2501                 if (curr->get_node()->get_promise(i))
2502                         return i;
2503         return -1;
2504 }
2505
2506 /**
2507  * Resolve a Promise with a current write.
2508  * @param write The ModelAction that is fulfilling Promises
2509  * @param promise_idx The index corresponding to the promise
2510  * @return True if the Promise was successfully resolved; false otherwise
2511  */
2512 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2513 {
2514         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2515         Promise *promise = (*promises)[promise_idx];
2516
2517         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2518                 ModelAction *read = promise->get_reader(i);
2519                 read_from(read, write);
2520                 actions_to_check.push_back(read);
2521         }
2522         /* Make sure the promise's value matches the write's value */
2523         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2524         if (!mo_graph->resolvePromise(promise, write))
2525                 priv->failed_promise = true;
2526
2527         promises->erase(promises->begin() + promise_idx);
2528         /**
2529          * @todo  It is possible to end up in an inconsistent state, where a
2530          * "resolved" promise may still be referenced if
2531          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2532          *
2533          * Note that the inconsistency only matters when dumping mo_graph to
2534          * file.
2535          *
2536          * delete promise;
2537          */
2538
2539         //Check whether reading these writes has made threads unable to
2540         //resolve promises
2541         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2542                 ModelAction *read = actions_to_check[i];
2543                 mo_check_promises(read, true);
2544         }
2545
2546         return true;
2547 }
2548
2549 /**
2550  * Compute the set of promises that could potentially be satisfied by this
2551  * action. Note that the set computation actually appears in the Node, not in
2552  * ModelChecker.
2553  * @param curr The ModelAction that may satisfy promises
2554  */
2555 void ModelChecker::compute_promises(ModelAction *curr)
2556 {
2557         for (unsigned int i = 0; i < promises->size(); i++) {
2558                 Promise *promise = (*promises)[i];
2559                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2560                         continue;
2561
2562                 bool satisfy = true;
2563                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2564                         const ModelAction *act = promise->get_reader(j);
2565                         if (act->happens_before(curr) ||
2566                                         act->could_synchronize_with(curr)) {
2567                                 satisfy = false;
2568                                 break;
2569                         }
2570                 }
2571                 if (satisfy)
2572                         curr->get_node()->set_promise(i);
2573         }
2574 }
2575
2576 /** Checks promises in response to change in ClockVector Threads. */
2577 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2578 {
2579         for (unsigned int i = 0; i < promises->size(); i++) {
2580                 Promise *promise = (*promises)[i];
2581                 if (!promise->thread_is_available(tid))
2582                         continue;
2583                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2584                         const ModelAction *act = promise->get_reader(j);
2585                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2586                                         merge_cv->synchronized_since(act)) {
2587                                 if (promise->eliminate_thread(tid)) {
2588                                         /* Promise has failed */
2589                                         priv->failed_promise = true;
2590                                         return;
2591                                 }
2592                         }
2593                 }
2594         }
2595 }
2596
2597 void ModelChecker::check_promises_thread_disabled()
2598 {
2599         for (unsigned int i = 0; i < promises->size(); i++) {
2600                 Promise *promise = (*promises)[i];
2601                 if (promise->has_failed()) {
2602                         priv->failed_promise = true;
2603                         return;
2604                 }
2605         }
2606 }
2607
2608 /**
2609  * @brief Checks promises in response to addition to modification order for
2610  * threads.
2611  *
2612  * We test whether threads are still available for satisfying promises after an
2613  * addition to our modification order constraints. Those that are unavailable
2614  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2615  * that promise has failed.
2616  *
2617  * @param act The ModelAction which updated the modification order
2618  * @param is_read_check Should be true if act is a read and we must check for
2619  * updates to the store from which it read (there is a distinction here for
2620  * RMW's, which are both a load and a store)
2621  */
2622 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2623 {
2624         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2625
2626         for (unsigned int i = 0; i < promises->size(); i++) {
2627                 Promise *promise = (*promises)[i];
2628
2629                 // Is this promise on the same location?
2630                 if (!promise->same_location(write))
2631                         continue;
2632
2633                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2634                         const ModelAction *pread = promise->get_reader(j);
2635                         if (!pread->happens_before(act))
2636                                continue;
2637                         if (mo_graph->checkPromise(write, promise)) {
2638                                 priv->failed_promise = true;
2639                                 return;
2640                         }
2641                         break;
2642                 }
2643
2644                 // Don't do any lookups twice for the same thread
2645                 if (!promise->thread_is_available(act->get_tid()))
2646                         continue;
2647
2648                 if (mo_graph->checkReachable(promise, write)) {
2649                         if (mo_graph->checkPromise(write, promise)) {
2650                                 priv->failed_promise = true;
2651                                 return;
2652                         }
2653                 }
2654         }
2655 }
2656
2657 /**
2658  * Compute the set of writes that may break the current pending release
2659  * sequence. This information is extracted from previou release sequence
2660  * calculations.
2661  *
2662  * @param curr The current ModelAction. Must be a release sequence fixup
2663  * action.
2664  */
2665 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2666 {
2667         if (pending_rel_seqs->empty())
2668                 return;
2669
2670         struct release_seq *pending = pending_rel_seqs->back();
2671         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2672                 const ModelAction *write = pending->writes[i];
2673                 curr->get_node()->add_relseq_break(write);
2674         }
2675
2676         /* NULL means don't break the sequence; just synchronize */
2677         curr->get_node()->add_relseq_break(NULL);
2678 }
2679
2680 /**
2681  * Build up an initial set of all past writes that this 'read' action may read
2682  * from, as well as any previously-observed future values that must still be valid.
2683  *
2684  * @param curr is the current ModelAction that we are exploring; it must be a
2685  * 'read' operation.
2686  */
2687 void ModelChecker::build_may_read_from(ModelAction *curr)
2688 {
2689         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2690         unsigned int i;
2691         ASSERT(curr->is_read());
2692
2693         ModelAction *last_sc_write = NULL;
2694
2695         if (curr->is_seqcst())
2696                 last_sc_write = get_last_seq_cst_write(curr);
2697
2698         /* Iterate over all threads */
2699         for (i = 0; i < thrd_lists->size(); i++) {
2700                 /* Iterate over actions in thread, starting from most recent */
2701                 action_list_t *list = &(*thrd_lists)[i];
2702                 action_list_t::reverse_iterator rit;
2703                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2704                         ModelAction *act = *rit;
2705
2706                         /* Only consider 'write' actions */
2707                         if (!act->is_write() || act == curr)
2708                                 continue;
2709
2710                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2711                         bool allow_read = true;
2712
2713                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2714                                 allow_read = false;
2715                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2716                                 allow_read = false;
2717
2718                         if (allow_read) {
2719                                 /* Only add feasible reads */
2720                                 mo_graph->startChanges();
2721                                 r_modification_order(curr, act);
2722                                 if (!is_infeasible())
2723                                         curr->get_node()->add_read_from_past(act);
2724                                 mo_graph->rollbackChanges();
2725                         }
2726
2727                         /* Include at most one act per-thread that "happens before" curr */
2728                         if (act->happens_before(curr))
2729                                 break;
2730                 }
2731         }
2732
2733         /* Inherit existing, promised future values */
2734         for (i = 0; i < promises->size(); i++) {
2735                 const Promise *promise = (*promises)[i];
2736                 const ModelAction *promise_read = promise->get_reader(0);
2737                 if (promise_read->same_var(curr)) {
2738                         /* Only add feasible future-values */
2739                         mo_graph->startChanges();
2740                         r_modification_order(curr, promise);
2741                         if (!is_infeasible())
2742                                 curr->get_node()->add_read_from_promise(promise_read);
2743                         mo_graph->rollbackChanges();
2744                 }
2745         }
2746
2747         /* We may find no valid may-read-from only if the execution is doomed */
2748         if (!curr->get_node()->read_from_size()) {
2749                 priv->no_valid_reads = true;
2750                 set_assert();
2751         }
2752
2753         if (DBG_ENABLED()) {
2754                 model_print("Reached read action:\n");
2755                 curr->print();
2756                 model_print("Printing read_from_past\n");
2757                 curr->get_node()->print_read_from_past();
2758                 model_print("End printing read_from_past\n");
2759         }
2760 }
2761
2762 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2763 {
2764         for ( ; write != NULL; write = write->get_reads_from()) {
2765                 /* UNINIT actions don't have a Node, and they never sleep */
2766                 if (write->is_uninitialized())
2767                         return true;
2768                 Node *prevnode = write->get_node()->get_parent();
2769
2770                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2771                 if (write->is_release() && thread_sleep)
2772                         return true;
2773                 if (!write->is_rmw())
2774                         return false;
2775         }
2776         return true;
2777 }
2778
2779 /**
2780  * @brief Create a new action representing an uninitialized atomic
2781  * @param location The memory location of the atomic object
2782  * @return A pointer to a new ModelAction
2783  */
2784 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2785 {
2786         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2787         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2788         act->create_cv(NULL);
2789         return act;
2790 }
2791
2792 static void print_list(action_list_t *list)
2793 {
2794         action_list_t::iterator it;
2795
2796         model_print("---------------------------------------------------------------------\n");
2797
2798         unsigned int hash = 0;
2799
2800         for (it = list->begin(); it != list->end(); it++) {
2801                 (*it)->print();
2802                 hash = hash^(hash<<3)^((*it)->hash());
2803         }
2804         model_print("HASH %u\n", hash);
2805         model_print("---------------------------------------------------------------------\n");
2806 }
2807
2808 #if SUPPORT_MOD_ORDER_DUMP
2809 void ModelChecker::dumpGraph(char *filename) const
2810 {
2811         char buffer[200];
2812         sprintf(buffer, "%s.dot", filename);
2813         FILE *file = fopen(buffer, "w");
2814         fprintf(file, "digraph %s {\n", filename);
2815         mo_graph->dumpNodes(file);
2816         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2817
2818         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2819                 ModelAction *act = *it;
2820                 if (act->is_read()) {
2821                         mo_graph->dot_print_node(file, act);
2822                         if (act->get_reads_from())
2823                                 mo_graph->dot_print_edge(file,
2824                                                 act->get_reads_from(),
2825                                                 act,
2826                                                 "label=\"rf\", color=red, weight=2");
2827                         else
2828                                 mo_graph->dot_print_edge(file,
2829                                                 act->get_reads_from_promise(),
2830                                                 act,
2831                                                 "label=\"rf\", color=red");
2832                 }
2833                 if (thread_array[act->get_tid()]) {
2834                         mo_graph->dot_print_edge(file,
2835                                         thread_array[id_to_int(act->get_tid())],
2836                                         act,
2837                                         "label=\"sb\", color=blue, weight=400");
2838                 }
2839
2840                 thread_array[act->get_tid()] = act;
2841         }
2842         fprintf(file, "}\n");
2843         model_free(thread_array);
2844         fclose(file);
2845 }
2846 #endif
2847
2848 /** @brief Prints an execution trace summary. */
2849 void ModelChecker::print_summary() const
2850 {
2851 #if SUPPORT_MOD_ORDER_DUMP
2852         char buffername[100];
2853         sprintf(buffername, "exec%04u", stats.num_total);
2854         mo_graph->dumpGraphToFile(buffername);
2855         sprintf(buffername, "graph%04u", stats.num_total);
2856         dumpGraph(buffername);
2857 #endif
2858
2859         model_print("Execution %d:", stats.num_total);
2860         if (isfeasibleprefix()) {
2861                 if (scheduler->all_threads_sleeping())
2862                         model_print(" SLEEP-SET REDUNDANT");
2863                 model_print("\n");
2864         } else
2865                 print_infeasibility(" INFEASIBLE");
2866         print_list(action_trace);
2867         model_print("\n");
2868 }
2869
2870 /**
2871  * Add a Thread to the system for the first time. Should only be called once
2872  * per thread.
2873  * @param t The Thread to add
2874  */
2875 void ModelChecker::add_thread(Thread *t)
2876 {
2877         thread_map->put(id_to_int(t->get_id()), t);
2878         scheduler->add_thread(t);
2879 }
2880
2881 /**
2882  * Removes a thread from the scheduler.
2883  * @param the thread to remove.
2884  */
2885 void ModelChecker::remove_thread(Thread *t)
2886 {
2887         scheduler->remove_thread(t);
2888 }
2889
2890 /**
2891  * @brief Get a Thread reference by its ID
2892  * @param tid The Thread's ID
2893  * @return A Thread reference
2894  */
2895 Thread * ModelChecker::get_thread(thread_id_t tid) const
2896 {
2897         return thread_map->get(id_to_int(tid));
2898 }
2899
2900 /**
2901  * @brief Get a reference to the Thread in which a ModelAction was executed
2902  * @param act The ModelAction
2903  * @return A Thread reference
2904  */
2905 Thread * ModelChecker::get_thread(const ModelAction *act) const
2906 {
2907         return get_thread(act->get_tid());
2908 }
2909
2910 /**
2911  * @brief Get a Promise's "promise number"
2912  *
2913  * A "promise number" is an index number that is unique to a promise, valid
2914  * only for a specific snapshot of an execution trace. Promises may come and go
2915  * as they are generated an resolved, so an index only retains meaning for the
2916  * current snapshot.
2917  *
2918  * @param promise The Promise to check
2919  * @return The promise index, if the promise still is valid; otherwise -1
2920  */
2921 int ModelChecker::get_promise_number(const Promise *promise) const
2922 {
2923         for (unsigned int i = 0; i < promises->size(); i++)
2924                 if ((*promises)[i] == promise)
2925                         return i;
2926         /* Not found */
2927         return -1;
2928 }
2929
2930 /**
2931  * @brief Check if a Thread is currently enabled
2932  * @param t The Thread to check
2933  * @return True if the Thread is currently enabled
2934  */
2935 bool ModelChecker::is_enabled(Thread *t) const
2936 {
2937         return scheduler->is_enabled(t);
2938 }
2939
2940 /**
2941  * @brief Check if a Thread is currently enabled
2942  * @param tid The ID of the Thread to check
2943  * @return True if the Thread is currently enabled
2944  */
2945 bool ModelChecker::is_enabled(thread_id_t tid) const
2946 {
2947         return scheduler->is_enabled(tid);
2948 }
2949
2950 /**
2951  * Switch from a model-checker context to a user-thread context. This is the
2952  * complement of ModelChecker::switch_to_master and must be called from the
2953  * model-checker context
2954  *
2955  * @param thread The user-thread to switch to
2956  */
2957 void ModelChecker::switch_from_master(Thread *thread)
2958 {
2959         scheduler->set_current_thread(thread);
2960         Thread::swap(&system_context, thread);
2961 }
2962
2963 /**
2964  * Switch from a user-context to the "master thread" context (a.k.a. system
2965  * context). This switch is made with the intention of exploring a particular
2966  * model-checking action (described by a ModelAction object). Must be called
2967  * from a user-thread context.
2968  *
2969  * @param act The current action that will be explored. May be NULL only if
2970  * trace is exiting via an assertion (see ModelChecker::set_assert and
2971  * ModelChecker::has_asserted).
2972  * @return Return the value returned by the current action
2973  */
2974 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2975 {
2976         DBG();
2977         Thread *old = thread_current();
2978         ASSERT(!old->get_pending());
2979         old->set_pending(act);
2980         if (Thread::swap(old, &system_context) < 0) {
2981                 perror("swap threads");
2982                 exit(EXIT_FAILURE);
2983         }
2984         return old->get_return_value();
2985 }
2986
2987 /**
2988  * Takes the next step in the execution, if possible.
2989  * @param curr The current step to take
2990  * @return Returns the next Thread to run, if any; NULL if this execution
2991  * should terminate
2992  */
2993 Thread * ModelChecker::take_step(ModelAction *curr)
2994 {
2995         Thread *curr_thrd = get_thread(curr);
2996         ASSERT(curr_thrd->get_state() == THREAD_READY);
2997
2998         curr = check_current_action(curr);
2999
3000         /* Infeasible -> don't take any more steps */
3001         if (is_infeasible())
3002                 return NULL;
3003         else if (isfeasibleprefix() && have_bug_reports()) {
3004                 set_assert();
3005                 return NULL;
3006         }
3007
3008         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3009                 return NULL;
3010
3011         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3012                 scheduler->remove_thread(curr_thrd);
3013
3014         Thread *next_thrd = get_next_thread(curr);
3015
3016         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3017                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3018
3019         return next_thrd;
3020 }
3021
3022 /** Wrapper to run the user's main function, with appropriate arguments */
3023 void user_main_wrapper(void *)
3024 {
3025         user_main(model->params.argc, model->params.argv);
3026 }
3027
3028 /** @brief Run ModelChecker for the user program */
3029 void ModelChecker::run()
3030 {
3031         do {
3032                 thrd_t user_thread;
3033                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3034                 add_thread(t);
3035
3036                 do {
3037                         /*
3038                          * Stash next pending action(s) for thread(s). There
3039                          * should only need to stash one thread's action--the
3040                          * thread which just took a step--plus the first step
3041                          * for any newly-created thread
3042                          */
3043                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3044                                 thread_id_t tid = int_to_id(i);
3045                                 Thread *thr = get_thread(tid);
3046                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3047                                         switch_from_master(thr);
3048                                 }
3049                         }
3050
3051                         /* Catch assertions from prior take_step or from
3052                          * between-ModelAction bugs (e.g., data races) */
3053                         if (has_asserted())
3054                                 break;
3055
3056                         /* Consume the next action for a Thread */
3057                         ModelAction *curr = t->get_pending();
3058                         t->set_pending(NULL);
3059                         t = take_step(curr);
3060                 } while (t && !t->is_model_thread());
3061
3062                 /*
3063                  * Launch end-of-execution release sequence fixups only when
3064                  * the execution is otherwise feasible AND there are:
3065                  *
3066                  * (1) pending release sequences
3067                  * (2) pending assertions that could be invalidated by a change
3068                  * in clock vectors (i.e., data races)
3069                  * (3) no pending promises
3070                  */
3071                 while (!pending_rel_seqs->empty() &&
3072                                 is_feasible_prefix_ignore_relseq() &&
3073                                 !unrealizedraces.empty()) {
3074                         model_print("*** WARNING: release sequence fixup action "
3075                                         "(%zu pending release seuqence(s)) ***\n",
3076                                         pending_rel_seqs->size());
3077                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3078                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3079                                         model_thread);
3080                         take_step(fixup);
3081                 };
3082         } while (next_execution());
3083
3084         model_print("******* Model-checking complete: *******\n");
3085         print_stats();
3086 }