action: add get_return_value()
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else {
487                 stats.num_redundant++;
488
489                 /**
490                  * @todo We can violate this ASSERT() when fairness/sleep sets
491                  * conflict to cause an execution to terminate, e.g. with:
492                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
493                  */
494                 //ASSERT(scheduler->all_threads_sleeping());
495         }
496 }
497
498 /** @brief Print execution stats */
499 void ModelChecker::print_stats() const
500 {
501         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
502         model_print("Number of redundant executions: %d\n", stats.num_redundant);
503         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
504         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
505         model_print("Total executions: %d\n", stats.num_total);
506         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
507 }
508
509 /**
510  * @brief End-of-exeuction print
511  * @param printbugs Should any existing bugs be printed?
512  */
513 void ModelChecker::print_execution(bool printbugs) const
514 {
515         print_program_output();
516
517         if (DBG_ENABLED() || params.verbose) {
518                 model_print("Earliest divergence point since last feasible execution:\n");
519                 if (earliest_diverge)
520                         earliest_diverge->print();
521                 else
522                         model_print("(Not set)\n");
523
524                 model_print("\n");
525                 print_stats();
526         }
527
528         /* Don't print invalid bugs */
529         if (printbugs)
530                 print_bugs();
531
532         model_print("\n");
533         print_summary();
534 }
535
536 /**
537  * Queries the model-checker for more executions to explore and, if one
538  * exists, resets the model-checker state to execute a new execution.
539  *
540  * @return If there are more executions to explore, return true. Otherwise,
541  * return false.
542  */
543 bool ModelChecker::next_execution()
544 {
545         DBG();
546         /* Is this execution a feasible execution that's worth bug-checking? */
547         bool complete = isfeasibleprefix() && (is_complete_execution() ||
548                         have_bug_reports());
549
550         /* End-of-execution bug checks */
551         if (complete) {
552                 if (is_deadlocked())
553                         assert_bug("Deadlock detected");
554
555                 checkDataRaces();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /**
582  * @brief Find the last fence-related backtracking conflict for a ModelAction
583  *
584  * This function performs the search for the most recent conflicting action
585  * against which we should perform backtracking, as affected by fence
586  * operations. This includes pairs of potentially-synchronizing actions which
587  * occur due to fence-acquire or fence-release, and hence should be explored in
588  * the opposite execution order.
589  *
590  * @param act The current action
591  * @return The most recent action which conflicts with act due to fences
592  */
593 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
594 {
595         /* Only perform release/acquire fence backtracking for stores */
596         if (!act->is_write())
597                 return NULL;
598
599         /* Find a fence-release (or, act is a release) */
600         ModelAction *last_release;
601         if (act->is_release())
602                 last_release = act;
603         else
604                 last_release = get_last_fence_release(act->get_tid());
605         if (!last_release)
606                 return NULL;
607
608         /* Skip past the release */
609         action_list_t *list = action_trace;
610         action_list_t::reverse_iterator rit;
611         for (rit = list->rbegin(); rit != list->rend(); rit++)
612                 if (*rit == last_release)
613                         break;
614         ASSERT(rit != list->rend());
615
616         /* Find a prior:
617          *   load-acquire
618          * or
619          *   load --sb-> fence-acquire */
620         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
621         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
622         bool found_acquire_fences = false;
623         for ( ; rit != list->rend(); rit++) {
624                 ModelAction *prev = *rit;
625                 if (act->same_thread(prev))
626                         continue;
627
628                 int tid = id_to_int(prev->get_tid());
629
630                 if (prev->is_read() && act->same_var(prev)) {
631                         if (prev->is_acquire()) {
632                                 /* Found most recent load-acquire, don't need
633                                  * to search for more fences */
634                                 if (!found_acquire_fences)
635                                         return NULL;
636                         } else {
637                                 prior_loads[tid] = prev;
638                         }
639                 }
640                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
641                         found_acquire_fences = true;
642                         acquire_fences[tid] = prev;
643                 }
644         }
645
646         ModelAction *latest_backtrack = NULL;
647         for (unsigned int i = 0; i < acquire_fences.size(); i++)
648                 if (acquire_fences[i] && prior_loads[i])
649                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
650                                 latest_backtrack = acquire_fences[i];
651         return latest_backtrack;
652 }
653
654 /**
655  * @brief Find the last backtracking conflict for a ModelAction
656  *
657  * This function performs the search for the most recent conflicting action
658  * against which we should perform backtracking. This primary includes pairs of
659  * synchronizing actions which should be explored in the opposite execution
660  * order.
661  *
662  * @param act The current action
663  * @return The most recent action which conflicts with act
664  */
665 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
666 {
667         switch (act->get_type()) {
668         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
669         case ATOMIC_READ:
670         case ATOMIC_WRITE:
671         case ATOMIC_RMW: {
672                 ModelAction *ret = NULL;
673
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (prev->could_synchronize_with(act)) {
680                                 ret = prev;
681                                 break;
682                         }
683                 }
684
685                 ModelAction *ret2 = get_last_fence_conflict(act);
686                 if (!ret2)
687                         return ret;
688                 if (!ret)
689                         return ret2;
690                 if (*ret < *ret2)
691                         return ret2;
692                 return ret;
693         }
694         case ATOMIC_LOCK:
695         case ATOMIC_TRYLOCK: {
696                 /* linear search: from most recent to oldest */
697                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
698                 action_list_t::reverse_iterator rit;
699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
700                         ModelAction *prev = *rit;
701                         if (act->is_conflicting_lock(prev))
702                                 return prev;
703                 }
704                 break;
705         }
706         case ATOMIC_UNLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (!act->same_thread(prev) && prev->is_failed_trylock())
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                         if (!act->same_thread(prev) && prev->is_notify())
726                                 return prev;
727                 }
728                 break;
729         }
730
731         case ATOMIC_NOTIFY_ALL:
732         case ATOMIC_NOTIFY_ONE: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_wait())
739                                 return prev;
740                 }
741                 break;
742         }
743         default:
744                 break;
745         }
746         return NULL;
747 }
748
749 /** This method finds backtracking points where we should try to
750  * reorder the parameter ModelAction against.
751  *
752  * @param the ModelAction to find backtracking points for.
753  */
754 void ModelChecker::set_backtracking(ModelAction *act)
755 {
756         Thread *t = get_thread(act);
757         ModelAction *prev = get_last_conflict(act);
758         if (prev == NULL)
759                 return;
760
761         Node *node = prev->get_node()->get_parent();
762
763         int low_tid, high_tid;
764         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
765                 low_tid = id_to_int(act->get_tid());
766                 high_tid = low_tid + 1;
767         } else {
768                 low_tid = 0;
769                 high_tid = get_num_threads();
770         }
771
772         for (int i = low_tid; i < high_tid; i++) {
773                 thread_id_t tid = int_to_id(i);
774
775                 /* Make sure this thread can be enabled here. */
776                 if (i >= node->get_num_threads())
777                         break;
778
779                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
780                 if (node->enabled_status(tid) != THREAD_ENABLED)
781                         continue;
782
783                 /* Check if this has been explored already */
784                 if (node->has_been_explored(tid))
785                         continue;
786
787                 /* See if fairness allows */
788                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
789                         bool unfair = false;
790                         for (int t = 0; t < node->get_num_threads(); t++) {
791                                 thread_id_t tother = int_to_id(t);
792                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
793                                         unfair = true;
794                                         break;
795                                 }
796                         }
797                         if (unfair)
798                                 continue;
799                 }
800                 /* Cache the latest backtracking point */
801                 set_latest_backtrack(prev);
802
803                 /* If this is a new backtracking point, mark the tree */
804                 if (!node->set_backtrack(tid))
805                         continue;
806                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
807                                         id_to_int(prev->get_tid()),
808                                         id_to_int(t->get_id()));
809                 if (DBG_ENABLED()) {
810                         prev->print();
811                         act->print();
812                 }
813         }
814 }
815
816 /**
817  * @brief Cache the a backtracking point as the "most recent", if eligible
818  *
819  * Note that this does not prepare the NodeStack for this backtracking
820  * operation, it only caches the action on a per-execution basis
821  *
822  * @param act The operation at which we should explore a different next action
823  * (i.e., backtracking point)
824  * @return True, if this action is now the most recent backtracking point;
825  * false otherwise
826  */
827 bool ModelChecker::set_latest_backtrack(ModelAction *act)
828 {
829         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
830                 priv->next_backtrack = act;
831                 return true;
832         }
833         return false;
834 }
835
836 /**
837  * Returns last backtracking point. The model checker will explore a different
838  * path for this point in the next execution.
839  * @return The ModelAction at which the next execution should diverge.
840  */
841 ModelAction * ModelChecker::get_next_backtrack()
842 {
843         ModelAction *next = priv->next_backtrack;
844         priv->next_backtrack = NULL;
845         return next;
846 }
847
848 /**
849  * Processes a read model action.
850  * @param curr is the read model action to process.
851  * @return True if processing this read updates the mo_graph.
852  */
853 bool ModelChecker::process_read(ModelAction *curr)
854 {
855         Node *node = curr->get_node();
856         while (true) {
857                 bool updated = false;
858                 switch (node->get_read_from_status()) {
859                 case READ_FROM_PAST: {
860                         const ModelAction *rf = node->get_read_from_past();
861                         ASSERT(rf);
862
863                         mo_graph->startChanges();
864
865                         ASSERT(!is_infeasible());
866                         if (!check_recency(curr, rf)) {
867                                 if (node->increment_read_from()) {
868                                         mo_graph->rollbackChanges();
869                                         continue;
870                                 } else {
871                                         priv->too_many_reads = true;
872                                 }
873                         }
874
875                         updated = r_modification_order(curr, rf);
876                         read_from(curr, rf);
877                         mo_graph->commitChanges();
878                         mo_check_promises(curr, true);
879                         break;
880                 }
881                 case READ_FROM_PROMISE: {
882                         Promise *promise = curr->get_node()->get_read_from_promise();
883                         if (promise->add_reader(curr))
884                                 priv->failed_promise = true;
885                         curr->set_read_from_promise(promise);
886                         mo_graph->startChanges();
887                         if (!check_recency(curr, promise))
888                                 priv->too_many_reads = true;
889                         updated = r_modification_order(curr, promise);
890                         mo_graph->commitChanges();
891                         break;
892                 }
893                 case READ_FROM_FUTURE: {
894                         /* Read from future value */
895                         struct future_value fv = node->get_future_value();
896                         Promise *promise = new Promise(curr, fv);
897                         curr->set_read_from_promise(promise);
898                         promises->push_back(promise);
899                         mo_graph->startChanges();
900                         updated = r_modification_order(curr, promise);
901                         mo_graph->commitChanges();
902                         break;
903                 }
904                 default:
905                         ASSERT(false);
906                 }
907                 get_thread(curr)->set_return_value(curr->get_return_value());
908                 return updated;
909         }
910 }
911
912 /**
913  * Processes a lock, trylock, or unlock model action.  @param curr is
914  * the read model action to process.
915  *
916  * The try lock operation checks whether the lock is taken.  If not,
917  * it falls to the normal lock operation case.  If so, it returns
918  * fail.
919  *
920  * The lock operation has already been checked that it is enabled, so
921  * it just grabs the lock and synchronizes with the previous unlock.
922  *
923  * The unlock operation has to re-enable all of the threads that are
924  * waiting on the lock.
925  *
926  * @return True if synchronization was updated; false otherwise
927  */
928 bool ModelChecker::process_mutex(ModelAction *curr)
929 {
930         std::mutex *mutex = NULL;
931         struct std::mutex_state *state = NULL;
932
933         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
934                 mutex = (std::mutex *)curr->get_location();
935                 state = mutex->get_state();
936         } else if (curr->is_wait()) {
937                 mutex = (std::mutex *)curr->get_value();
938                 state = mutex->get_state();
939         }
940
941         switch (curr->get_type()) {
942         case ATOMIC_TRYLOCK: {
943                 bool success = !state->islocked;
944                 curr->set_try_lock(success);
945                 if (!success) {
946                         get_thread(curr)->set_return_value(0);
947                         break;
948                 }
949                 get_thread(curr)->set_return_value(1);
950         }
951                 //otherwise fall into the lock case
952         case ATOMIC_LOCK: {
953                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
954                         assert_bug("Lock access before initialization");
955                 state->islocked = true;
956                 ModelAction *unlock = get_last_unlock(curr);
957                 //synchronize with the previous unlock statement
958                 if (unlock != NULL) {
959                         curr->synchronize_with(unlock);
960                         return true;
961                 }
962                 break;
963         }
964         case ATOMIC_UNLOCK: {
965                 //unlock the lock
966                 state->islocked = false;
967                 //wake up the other threads
968                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
969                 //activate all the waiting threads
970                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
971                         scheduler->wake(get_thread(*rit));
972                 }
973                 waiters->clear();
974                 break;
975         }
976         case ATOMIC_WAIT: {
977                 //unlock the lock
978                 state->islocked = false;
979                 //wake up the other threads
980                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
981                 //activate all the waiting threads
982                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
983                         scheduler->wake(get_thread(*rit));
984                 }
985                 waiters->clear();
986                 //check whether we should go to sleep or not...simulate spurious failures
987                 if (curr->get_node()->get_misc() == 0) {
988                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
989                         //disable us
990                         scheduler->sleep(get_thread(curr));
991                 }
992                 break;
993         }
994         case ATOMIC_NOTIFY_ALL: {
995                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
996                 //activate all the waiting threads
997                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
998                         scheduler->wake(get_thread(*rit));
999                 }
1000                 waiters->clear();
1001                 break;
1002         }
1003         case ATOMIC_NOTIFY_ONE: {
1004                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1005                 int wakeupthread = curr->get_node()->get_misc();
1006                 action_list_t::iterator it = waiters->begin();
1007                 advance(it, wakeupthread);
1008                 scheduler->wake(get_thread(*it));
1009                 waiters->erase(it);
1010                 break;
1011         }
1012
1013         default:
1014                 ASSERT(0);
1015         }
1016         return false;
1017 }
1018
1019 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1020 {
1021         /* Do more ambitious checks now that mo is more complete */
1022         if (mo_may_allow(writer, reader)) {
1023                 Node *node = reader->get_node();
1024
1025                 /* Find an ancestor thread which exists at the time of the reader */
1026                 Thread *write_thread = get_thread(writer);
1027                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1028                         write_thread = write_thread->get_parent();
1029
1030                 struct future_value fv = {
1031                         writer->get_write_value(),
1032                         writer->get_seq_number() + params.maxfuturedelay,
1033                         write_thread->get_id(),
1034                 };
1035                 if (node->add_future_value(fv))
1036                         set_latest_backtrack(reader);
1037         }
1038 }
1039
1040 /**
1041  * Process a write ModelAction
1042  * @param curr The ModelAction to process
1043  * @return True if the mo_graph was updated or promises were resolved
1044  */
1045 bool ModelChecker::process_write(ModelAction *curr)
1046 {
1047         /* Readers to which we may send our future value */
1048         std::vector< ModelAction *, ModelAlloc<ModelAction *> > send_fv;
1049
1050         bool updated_mod_order = w_modification_order(curr, &send_fv);
1051         int promise_idx = get_promise_to_resolve(curr);
1052         const ModelAction *earliest_promise_reader;
1053         bool updated_promises = false;
1054
1055         if (promise_idx >= 0) {
1056                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1057                 updated_promises = resolve_promise(curr, promise_idx);
1058         } else
1059                 earliest_promise_reader = NULL;
1060
1061         /* Don't send future values to reads after the Promise we resolve */
1062         for (unsigned int i = 0; i < send_fv.size(); i++) {
1063                 ModelAction *read = send_fv[i];
1064                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1065                         futurevalues->push_back(PendingFutureValue(curr, read));
1066         }
1067
1068         if (promises->size() == 0) {
1069                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1070                         struct PendingFutureValue pfv = (*futurevalues)[i];
1071                         add_future_value(pfv.writer, pfv.act);
1072                 }
1073                 futurevalues->clear();
1074         }
1075
1076         mo_graph->commitChanges();
1077         mo_check_promises(curr, false);
1078
1079         get_thread(curr)->set_return_value(VALUE_NONE);
1080         return updated_mod_order || updated_promises;
1081 }
1082
1083 /**
1084  * Process a fence ModelAction
1085  * @param curr The ModelAction to process
1086  * @return True if synchronization was updated
1087  */
1088 bool ModelChecker::process_fence(ModelAction *curr)
1089 {
1090         /*
1091          * fence-relaxed: no-op
1092          * fence-release: only log the occurence (not in this function), for
1093          *   use in later synchronization
1094          * fence-acquire (this function): search for hypothetical release
1095          *   sequences
1096          */
1097         bool updated = false;
1098         if (curr->is_acquire()) {
1099                 action_list_t *list = action_trace;
1100                 action_list_t::reverse_iterator rit;
1101                 /* Find X : is_read(X) && X --sb-> curr */
1102                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1103                         ModelAction *act = *rit;
1104                         if (act == curr)
1105                                 continue;
1106                         if (act->get_tid() != curr->get_tid())
1107                                 continue;
1108                         /* Stop at the beginning of the thread */
1109                         if (act->is_thread_start())
1110                                 break;
1111                         /* Stop once we reach a prior fence-acquire */
1112                         if (act->is_fence() && act->is_acquire())
1113                                 break;
1114                         if (!act->is_read())
1115                                 continue;
1116                         /* read-acquire will find its own release sequences */
1117                         if (act->is_acquire())
1118                                 continue;
1119
1120                         /* Establish hypothetical release sequences */
1121                         rel_heads_list_t release_heads;
1122                         get_release_seq_heads(curr, act, &release_heads);
1123                         for (unsigned int i = 0; i < release_heads.size(); i++)
1124                                 if (!curr->synchronize_with(release_heads[i]))
1125                                         set_bad_synchronization();
1126                         if (release_heads.size() != 0)
1127                                 updated = true;
1128                 }
1129         }
1130         return updated;
1131 }
1132
1133 /**
1134  * @brief Process the current action for thread-related activity
1135  *
1136  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1137  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1138  * synchronization, etc.  This function is a no-op for non-THREAD actions
1139  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1140  *
1141  * @param curr The current action
1142  * @return True if synchronization was updated or a thread completed
1143  */
1144 bool ModelChecker::process_thread_action(ModelAction *curr)
1145 {
1146         bool updated = false;
1147
1148         switch (curr->get_type()) {
1149         case THREAD_CREATE: {
1150                 thrd_t *thrd = (thrd_t *)curr->get_location();
1151                 struct thread_params *params = (struct thread_params *)curr->get_value();
1152                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1153                 add_thread(th);
1154                 th->set_creation(curr);
1155                 /* Promises can be satisfied by children */
1156                 for (unsigned int i = 0; i < promises->size(); i++) {
1157                         Promise *promise = (*promises)[i];
1158                         if (promise->thread_is_available(curr->get_tid()))
1159                                 promise->add_thread(th->get_id());
1160                 }
1161                 break;
1162         }
1163         case THREAD_JOIN: {
1164                 Thread *blocking = curr->get_thread_operand();
1165                 ModelAction *act = get_last_action(blocking->get_id());
1166                 curr->synchronize_with(act);
1167                 updated = true; /* trigger rel-seq checks */
1168                 break;
1169         }
1170         case THREAD_FINISH: {
1171                 Thread *th = get_thread(curr);
1172                 while (!th->wait_list_empty()) {
1173                         ModelAction *act = th->pop_wait_list();
1174                         scheduler->wake(get_thread(act));
1175                 }
1176                 th->complete();
1177                 /* Completed thread can't satisfy promises */
1178                 for (unsigned int i = 0; i < promises->size(); i++) {
1179                         Promise *promise = (*promises)[i];
1180                         if (promise->thread_is_available(th->get_id()))
1181                                 if (promise->eliminate_thread(th->get_id()))
1182                                         priv->failed_promise = true;
1183                 }
1184                 updated = true; /* trigger rel-seq checks */
1185                 break;
1186         }
1187         case THREAD_START: {
1188                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1189                 break;
1190         }
1191         default:
1192                 break;
1193         }
1194
1195         return updated;
1196 }
1197
1198 /**
1199  * @brief Process the current action for release sequence fixup activity
1200  *
1201  * Performs model-checker release sequence fixups for the current action,
1202  * forcing a single pending release sequence to break (with a given, potential
1203  * "loose" write) or to complete (i.e., synchronize). If a pending release
1204  * sequence forms a complete release sequence, then we must perform the fixup
1205  * synchronization, mo_graph additions, etc.
1206  *
1207  * @param curr The current action; must be a release sequence fixup action
1208  * @param work_queue The work queue to which to add work items as they are
1209  * generated
1210  */
1211 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1212 {
1213         const ModelAction *write = curr->get_node()->get_relseq_break();
1214         struct release_seq *sequence = pending_rel_seqs->back();
1215         pending_rel_seqs->pop_back();
1216         ASSERT(sequence);
1217         ModelAction *acquire = sequence->acquire;
1218         const ModelAction *rf = sequence->rf;
1219         const ModelAction *release = sequence->release;
1220         ASSERT(acquire);
1221         ASSERT(release);
1222         ASSERT(rf);
1223         ASSERT(release->same_thread(rf));
1224
1225         if (write == NULL) {
1226                 /**
1227                  * @todo Forcing a synchronization requires that we set
1228                  * modification order constraints. For instance, we can't allow
1229                  * a fixup sequence in which two separate read-acquire
1230                  * operations read from the same sequence, where the first one
1231                  * synchronizes and the other doesn't. Essentially, we can't
1232                  * allow any writes to insert themselves between 'release' and
1233                  * 'rf'
1234                  */
1235
1236                 /* Must synchronize */
1237                 if (!acquire->synchronize_with(release)) {
1238                         set_bad_synchronization();
1239                         return;
1240                 }
1241                 /* Re-check all pending release sequences */
1242                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1243                 /* Re-check act for mo_graph edges */
1244                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1245
1246                 /* propagate synchronization to later actions */
1247                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1248                 for (; (*rit) != acquire; rit++) {
1249                         ModelAction *propagate = *rit;
1250                         if (acquire->happens_before(propagate)) {
1251                                 propagate->synchronize_with(acquire);
1252                                 /* Re-check 'propagate' for mo_graph edges */
1253                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1254                         }
1255                 }
1256         } else {
1257                 /* Break release sequence with new edges:
1258                  *   release --mo--> write --mo--> rf */
1259                 mo_graph->addEdge(release, write);
1260                 mo_graph->addEdge(write, rf);
1261         }
1262
1263         /* See if we have realized a data race */
1264         checkDataRaces();
1265 }
1266
1267 /**
1268  * Initialize the current action by performing one or more of the following
1269  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1270  * in the NodeStack, manipulating backtracking sets, allocating and
1271  * initializing clock vectors, and computing the promises to fulfill.
1272  *
1273  * @param curr The current action, as passed from the user context; may be
1274  * freed/invalidated after the execution of this function, with a different
1275  * action "returned" its place (pass-by-reference)
1276  * @return True if curr is a newly-explored action; false otherwise
1277  */
1278 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1279 {
1280         ModelAction *newcurr;
1281
1282         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1283                 newcurr = process_rmw(*curr);
1284                 delete *curr;
1285
1286                 if (newcurr->is_rmw())
1287                         compute_promises(newcurr);
1288
1289                 *curr = newcurr;
1290                 return false;
1291         }
1292
1293         (*curr)->set_seq_number(get_next_seq_num());
1294
1295         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1296         if (newcurr) {
1297                 /* First restore type and order in case of RMW operation */
1298                 if ((*curr)->is_rmwr())
1299                         newcurr->copy_typeandorder(*curr);
1300
1301                 ASSERT((*curr)->get_location() == newcurr->get_location());
1302                 newcurr->copy_from_new(*curr);
1303
1304                 /* Discard duplicate ModelAction; use action from NodeStack */
1305                 delete *curr;
1306
1307                 /* Always compute new clock vector */
1308                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1309
1310                 *curr = newcurr;
1311                 return false; /* Action was explored previously */
1312         } else {
1313                 newcurr = *curr;
1314
1315                 /* Always compute new clock vector */
1316                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1317
1318                 /* Assign most recent release fence */
1319                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1320
1321                 /*
1322                  * Perform one-time actions when pushing new ModelAction onto
1323                  * NodeStack
1324                  */
1325                 if (newcurr->is_write())
1326                         compute_promises(newcurr);
1327                 else if (newcurr->is_relseq_fixup())
1328                         compute_relseq_breakwrites(newcurr);
1329                 else if (newcurr->is_wait())
1330                         newcurr->get_node()->set_misc_max(2);
1331                 else if (newcurr->is_notify_one()) {
1332                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1333                 }
1334                 return true; /* This was a new ModelAction */
1335         }
1336 }
1337
1338 /**
1339  * @brief Establish reads-from relation between two actions
1340  *
1341  * Perform basic operations involved with establishing a concrete rf relation,
1342  * including setting the ModelAction data and checking for release sequences.
1343  *
1344  * @param act The action that is reading (must be a read)
1345  * @param rf The action from which we are reading (must be a write)
1346  *
1347  * @return True if this read established synchronization
1348  */
1349 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1350 {
1351         ASSERT(rf);
1352         act->set_read_from(rf);
1353         if (act->is_acquire()) {
1354                 rel_heads_list_t release_heads;
1355                 get_release_seq_heads(act, act, &release_heads);
1356                 int num_heads = release_heads.size();
1357                 for (unsigned int i = 0; i < release_heads.size(); i++)
1358                         if (!act->synchronize_with(release_heads[i])) {
1359                                 set_bad_synchronization();
1360                                 num_heads--;
1361                         }
1362                 return num_heads > 0;
1363         }
1364         return false;
1365 }
1366
1367 /**
1368  * Check promises and eliminate potentially-satisfying threads when a thread is
1369  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1370  * no longer satisfy a promise generated from that thread.
1371  *
1372  * @param blocker The thread on which a thread is waiting
1373  * @param waiting The waiting thread
1374  */
1375 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1376 {
1377         for (unsigned int i = 0; i < promises->size(); i++) {
1378                 Promise *promise = (*promises)[i];
1379                 if (!promise->thread_is_available(waiting->get_id()))
1380                         continue;
1381                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1382                         ModelAction *reader = promise->get_reader(j);
1383                         if (reader->get_tid() != blocker->get_id())
1384                                 continue;
1385                         if (promise->eliminate_thread(waiting->get_id())) {
1386                                 /* Promise has failed */
1387                                 priv->failed_promise = true;
1388                         } else {
1389                                 /* Only eliminate the 'waiting' thread once */
1390                                 return;
1391                         }
1392                 }
1393         }
1394 }
1395
1396 /**
1397  * @brief Check whether a model action is enabled.
1398  *
1399  * Checks whether a lock or join operation would be successful (i.e., is the
1400  * lock already locked, or is the joined thread already complete). If not, put
1401  * the action in a waiter list.
1402  *
1403  * @param curr is the ModelAction to check whether it is enabled.
1404  * @return a bool that indicates whether the action is enabled.
1405  */
1406 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1407         if (curr->is_lock()) {
1408                 std::mutex *lock = (std::mutex *)curr->get_location();
1409                 struct std::mutex_state *state = lock->get_state();
1410                 if (state->islocked) {
1411                         //Stick the action in the appropriate waiting queue
1412                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1413                         return false;
1414                 }
1415         } else if (curr->get_type() == THREAD_JOIN) {
1416                 Thread *blocking = (Thread *)curr->get_location();
1417                 if (!blocking->is_complete()) {
1418                         blocking->push_wait_list(curr);
1419                         thread_blocking_check_promises(blocking, get_thread(curr));
1420                         return false;
1421                 }
1422         }
1423
1424         return true;
1425 }
1426
1427 /**
1428  * This is the heart of the model checker routine. It performs model-checking
1429  * actions corresponding to a given "current action." Among other processes, it
1430  * calculates reads-from relationships, updates synchronization clock vectors,
1431  * forms a memory_order constraints graph, and handles replay/backtrack
1432  * execution when running permutations of previously-observed executions.
1433  *
1434  * @param curr The current action to process
1435  * @return The ModelAction that is actually executed; may be different than
1436  * curr; may be NULL, if the current action is not enabled to run
1437  */
1438 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1439 {
1440         ASSERT(curr);
1441         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1442
1443         if (!check_action_enabled(curr)) {
1444                 /* Make the execution look like we chose to run this action
1445                  * much later, when a lock/join can succeed */
1446                 get_thread(curr)->set_pending(curr);
1447                 scheduler->sleep(get_thread(curr));
1448                 return NULL;
1449         }
1450
1451         bool newly_explored = initialize_curr_action(&curr);
1452
1453         DBG();
1454         if (DBG_ENABLED())
1455                 curr->print();
1456
1457         wake_up_sleeping_actions(curr);
1458
1459         /* Add the action to lists before any other model-checking tasks */
1460         if (!second_part_of_rmw)
1461                 add_action_to_lists(curr);
1462
1463         /* Build may_read_from set for newly-created actions */
1464         if (newly_explored && curr->is_read())
1465                 build_may_read_from(curr);
1466
1467         /* Initialize work_queue with the "current action" work */
1468         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1469         while (!work_queue.empty() && !has_asserted()) {
1470                 WorkQueueEntry work = work_queue.front();
1471                 work_queue.pop_front();
1472
1473                 switch (work.type) {
1474                 case WORK_CHECK_CURR_ACTION: {
1475                         ModelAction *act = work.action;
1476                         bool update = false; /* update this location's release seq's */
1477                         bool update_all = false; /* update all release seq's */
1478
1479                         if (process_thread_action(curr))
1480                                 update_all = true;
1481
1482                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1483                                 update = true;
1484
1485                         if (act->is_write() && process_write(act))
1486                                 update = true;
1487
1488                         if (act->is_fence() && process_fence(act))
1489                                 update_all = true;
1490
1491                         if (act->is_mutex_op() && process_mutex(act))
1492                                 update_all = true;
1493
1494                         if (act->is_relseq_fixup())
1495                                 process_relseq_fixup(curr, &work_queue);
1496
1497                         if (update_all)
1498                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1499                         else if (update)
1500                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1501                         break;
1502                 }
1503                 case WORK_CHECK_RELEASE_SEQ:
1504                         resolve_release_sequences(work.location, &work_queue);
1505                         break;
1506                 case WORK_CHECK_MO_EDGES: {
1507                         /** @todo Complete verification of work_queue */
1508                         ModelAction *act = work.action;
1509                         bool updated = false;
1510
1511                         if (act->is_read()) {
1512                                 const ModelAction *rf = act->get_reads_from();
1513                                 const Promise *promise = act->get_reads_from_promise();
1514                                 if (rf) {
1515                                         if (r_modification_order(act, rf))
1516                                                 updated = true;
1517                                 } else if (promise) {
1518                                         if (r_modification_order(act, promise))
1519                                                 updated = true;
1520                                 }
1521                         }
1522                         if (act->is_write()) {
1523                                 if (w_modification_order(act, NULL))
1524                                         updated = true;
1525                         }
1526                         mo_graph->commitChanges();
1527
1528                         if (updated)
1529                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1530                         break;
1531                 }
1532                 default:
1533                         ASSERT(false);
1534                         break;
1535                 }
1536         }
1537
1538         check_curr_backtracking(curr);
1539         set_backtracking(curr);
1540         return curr;
1541 }
1542
1543 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1544 {
1545         Node *currnode = curr->get_node();
1546         Node *parnode = currnode->get_parent();
1547
1548         if ((parnode && !parnode->backtrack_empty()) ||
1549                          !currnode->misc_empty() ||
1550                          !currnode->read_from_empty() ||
1551                          !currnode->promise_empty() ||
1552                          !currnode->relseq_break_empty()) {
1553                 set_latest_backtrack(curr);
1554         }
1555 }
1556
1557 bool ModelChecker::promises_expired() const
1558 {
1559         for (unsigned int i = 0; i < promises->size(); i++) {
1560                 Promise *promise = (*promises)[i];
1561                 if (promise->get_expiration() < priv->used_sequence_numbers)
1562                         return true;
1563         }
1564         return false;
1565 }
1566
1567 /**
1568  * This is the strongest feasibility check available.
1569  * @return whether the current trace (partial or complete) must be a prefix of
1570  * a feasible trace.
1571  */
1572 bool ModelChecker::isfeasibleprefix() const
1573 {
1574         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1575 }
1576
1577 /**
1578  * Print disagnostic information about an infeasible execution
1579  * @param prefix A string to prefix the output with; if NULL, then a default
1580  * message prefix will be provided
1581  */
1582 void ModelChecker::print_infeasibility(const char *prefix) const
1583 {
1584         char buf[100];
1585         char *ptr = buf;
1586         if (mo_graph->checkForCycles())
1587                 ptr += sprintf(ptr, "[mo cycle]");
1588         if (priv->failed_promise)
1589                 ptr += sprintf(ptr, "[failed promise]");
1590         if (priv->too_many_reads)
1591                 ptr += sprintf(ptr, "[too many reads]");
1592         if (priv->no_valid_reads)
1593                 ptr += sprintf(ptr, "[no valid reads-from]");
1594         if (priv->bad_synchronization)
1595                 ptr += sprintf(ptr, "[bad sw ordering]");
1596         if (promises_expired())
1597                 ptr += sprintf(ptr, "[promise expired]");
1598         if (promises->size() != 0)
1599                 ptr += sprintf(ptr, "[unresolved promise]");
1600         if (ptr != buf)
1601                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1602 }
1603
1604 /**
1605  * Returns whether the current completed trace is feasible, except for pending
1606  * release sequences.
1607  */
1608 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1609 {
1610         return !is_infeasible() && promises->size() == 0;
1611 }
1612
1613 /**
1614  * Check if the current partial trace is infeasible. Does not check any
1615  * end-of-execution flags, which might rule out the execution. Thus, this is
1616  * useful only for ruling an execution as infeasible.
1617  * @return whether the current partial trace is infeasible.
1618  */
1619 bool ModelChecker::is_infeasible() const
1620 {
1621         return mo_graph->checkForCycles() ||
1622                 priv->no_valid_reads ||
1623                 priv->failed_promise ||
1624                 priv->too_many_reads ||
1625                 priv->bad_synchronization ||
1626                 promises_expired();
1627 }
1628
1629 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1630 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1631         ModelAction *lastread = get_last_action(act->get_tid());
1632         lastread->process_rmw(act);
1633         if (act->is_rmw()) {
1634                 if (lastread->get_reads_from())
1635                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1636                 else
1637                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1638                 mo_graph->commitChanges();
1639         }
1640         return lastread;
1641 }
1642
1643 /**
1644  * A helper function for ModelChecker::check_recency, to check if the current
1645  * thread is able to read from a different write/promise for 'params.maxreads'
1646  * number of steps and if that write/promise should become visible (i.e., is
1647  * ordered later in the modification order). This helps model memory liveness.
1648  *
1649  * @param curr The current action. Must be a read.
1650  * @param rf The write/promise from which we plan to read
1651  * @param other_rf The write/promise from which we may read
1652  * @return True if we were able to read from other_rf for params.maxreads steps
1653  */
1654 template <typename T, typename U>
1655 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1656 {
1657         /* Need a different write/promise */
1658         if (other_rf->equals(rf))
1659                 return false;
1660
1661         /* Only look for "newer" writes/promises */
1662         if (!mo_graph->checkReachable(rf, other_rf))
1663                 return false;
1664
1665         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1666         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1667         action_list_t::reverse_iterator rit = list->rbegin();
1668         ASSERT((*rit) == curr);
1669         /* Skip past curr */
1670         rit++;
1671
1672         /* Does this write/promise work for everyone? */
1673         for (int i = 0; i < params.maxreads; i++, rit++) {
1674                 ModelAction *act = *rit;
1675                 if (!act->may_read_from(other_rf))
1676                         return false;
1677         }
1678         return true;
1679 }
1680
1681 /**
1682  * Checks whether a thread has read from the same write or Promise for too many
1683  * times without seeing the effects of a later write/Promise.
1684  *
1685  * Basic idea:
1686  * 1) there must a different write/promise that we could read from,
1687  * 2) we must have read from the same write/promise in excess of maxreads times,
1688  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1689  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1690  *
1691  * If so, we decide that the execution is no longer feasible.
1692  *
1693  * @param curr The current action. Must be a read.
1694  * @param rf The ModelAction/Promise from which we might read.
1695  * @return True if the read should succeed; false otherwise
1696  */
1697 template <typename T>
1698 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1699 {
1700         if (!params.maxreads)
1701                 return true;
1702
1703         //NOTE: Next check is just optimization, not really necessary....
1704         if (curr->get_node()->get_read_from_past_size() +
1705                         curr->get_node()->get_read_from_promise_size() <= 1)
1706                 return true;
1707
1708         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1709         int tid = id_to_int(curr->get_tid());
1710         ASSERT(tid < (int)thrd_lists->size());
1711         action_list_t *list = &(*thrd_lists)[tid];
1712         action_list_t::reverse_iterator rit = list->rbegin();
1713         ASSERT((*rit) == curr);
1714         /* Skip past curr */
1715         rit++;
1716
1717         action_list_t::reverse_iterator ritcopy = rit;
1718         /* See if we have enough reads from the same value */
1719         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1720                 if (ritcopy == list->rend())
1721                         return true;
1722                 ModelAction *act = *ritcopy;
1723                 if (!act->is_read())
1724                         return true;
1725                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1726                         return true;
1727                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1728                         return true;
1729                 if (act->get_node()->get_read_from_past_size() +
1730                                 act->get_node()->get_read_from_promise_size() <= 1)
1731                         return true;
1732         }
1733         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1734                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1735                 if (should_read_instead(curr, rf, write))
1736                         return false; /* liveness failure */
1737         }
1738         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1739                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1740                 if (should_read_instead(curr, rf, promise))
1741                         return false; /* liveness failure */
1742         }
1743         return true;
1744 }
1745
1746 /**
1747  * Updates the mo_graph with the constraints imposed from the current
1748  * read.
1749  *
1750  * Basic idea is the following: Go through each other thread and find
1751  * the last action that happened before our read.  Two cases:
1752  *
1753  * (1) The action is a write => that write must either occur before
1754  * the write we read from or be the write we read from.
1755  *
1756  * (2) The action is a read => the write that that action read from
1757  * must occur before the write we read from or be the same write.
1758  *
1759  * @param curr The current action. Must be a read.
1760  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1761  * @return True if modification order edges were added; false otherwise
1762  */
1763 template <typename rf_type>
1764 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1765 {
1766         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1767         unsigned int i;
1768         bool added = false;
1769         ASSERT(curr->is_read());
1770
1771         /* Last SC fence in the current thread */
1772         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1773
1774         /* Iterate over all threads */
1775         for (i = 0; i < thrd_lists->size(); i++) {
1776                 /* Last SC fence in thread i */
1777                 ModelAction *last_sc_fence_thread_local = NULL;
1778                 if (int_to_id((int)i) != curr->get_tid())
1779                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1780
1781                 /* Last SC fence in thread i, before last SC fence in current thread */
1782                 ModelAction *last_sc_fence_thread_before = NULL;
1783                 if (last_sc_fence_local)
1784                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1785
1786                 /* Iterate over actions in thread, starting from most recent */
1787                 action_list_t *list = &(*thrd_lists)[i];
1788                 action_list_t::reverse_iterator rit;
1789                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1790                         ModelAction *act = *rit;
1791
1792                         if (act->is_write() && !act->equals(rf) && act != curr) {
1793                                 /* C++, Section 29.3 statement 5 */
1794                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1795                                                 *act < *last_sc_fence_thread_local) {
1796                                         added = mo_graph->addEdge(act, rf) || added;
1797                                         break;
1798                                 }
1799                                 /* C++, Section 29.3 statement 4 */
1800                                 else if (act->is_seqcst() && last_sc_fence_local &&
1801                                                 *act < *last_sc_fence_local) {
1802                                         added = mo_graph->addEdge(act, rf) || added;
1803                                         break;
1804                                 }
1805                                 /* C++, Section 29.3 statement 6 */
1806                                 else if (last_sc_fence_thread_before &&
1807                                                 *act < *last_sc_fence_thread_before) {
1808                                         added = mo_graph->addEdge(act, rf) || added;
1809                                         break;
1810                                 }
1811                         }
1812
1813                         /*
1814                          * Include at most one act per-thread that "happens
1815                          * before" curr. Don't consider reflexively.
1816                          */
1817                         if (act->happens_before(curr) && act != curr) {
1818                                 if (act->is_write()) {
1819                                         if (!act->equals(rf)) {
1820                                                 added = mo_graph->addEdge(act, rf) || added;
1821                                         }
1822                                 } else {
1823                                         const ModelAction *prevrf = act->get_reads_from();
1824                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1825                                         if (prevrf) {
1826                                                 if (!prevrf->equals(rf))
1827                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1828                                         } else if (!prevrf_promise->equals(rf)) {
1829                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1830                                         }
1831                                 }
1832                                 break;
1833                         }
1834                 }
1835         }
1836
1837         /*
1838          * All compatible, thread-exclusive promises must be ordered after any
1839          * concrete loads from the same thread
1840          */
1841         for (unsigned int i = 0; i < promises->size(); i++)
1842                 if ((*promises)[i]->is_compatible_exclusive(curr))
1843                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1844
1845         return added;
1846 }
1847
1848 /**
1849  * Updates the mo_graph with the constraints imposed from the current write.
1850  *
1851  * Basic idea is the following: Go through each other thread and find
1852  * the lastest action that happened before our write.  Two cases:
1853  *
1854  * (1) The action is a write => that write must occur before
1855  * the current write
1856  *
1857  * (2) The action is a read => the write that that action read from
1858  * must occur before the current write.
1859  *
1860  * This method also handles two other issues:
1861  *
1862  * (I) Sequential Consistency: Making sure that if the current write is
1863  * seq_cst, that it occurs after the previous seq_cst write.
1864  *
1865  * (II) Sending the write back to non-synchronizing reads.
1866  *
1867  * @param curr The current action. Must be a write.
1868  * @param send_fv A vector for stashing reads to which we may pass our future
1869  * value. If NULL, then don't record any future values.
1870  * @return True if modification order edges were added; false otherwise
1871  */
1872 bool ModelChecker::w_modification_order(ModelAction *curr, std::vector< ModelAction *, ModelAlloc<ModelAction *> > *send_fv)
1873 {
1874         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1875         unsigned int i;
1876         bool added = false;
1877         ASSERT(curr->is_write());
1878
1879         if (curr->is_seqcst()) {
1880                 /* We have to at least see the last sequentially consistent write,
1881                          so we are initialized. */
1882                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1883                 if (last_seq_cst != NULL) {
1884                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1885                 }
1886         }
1887
1888         /* Last SC fence in the current thread */
1889         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1890
1891         /* Iterate over all threads */
1892         for (i = 0; i < thrd_lists->size(); i++) {
1893                 /* Last SC fence in thread i, before last SC fence in current thread */
1894                 ModelAction *last_sc_fence_thread_before = NULL;
1895                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1896                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1897
1898                 /* Iterate over actions in thread, starting from most recent */
1899                 action_list_t *list = &(*thrd_lists)[i];
1900                 action_list_t::reverse_iterator rit;
1901                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1902                         ModelAction *act = *rit;
1903                         if (act == curr) {
1904                                 /*
1905                                  * 1) If RMW and it actually read from something, then we
1906                                  * already have all relevant edges, so just skip to next
1907                                  * thread.
1908                                  *
1909                                  * 2) If RMW and it didn't read from anything, we should
1910                                  * whatever edge we can get to speed up convergence.
1911                                  *
1912                                  * 3) If normal write, we need to look at earlier actions, so
1913                                  * continue processing list.
1914                                  */
1915                                 if (curr->is_rmw()) {
1916                                         if (curr->get_reads_from() != NULL)
1917                                                 break;
1918                                         else
1919                                                 continue;
1920                                 } else
1921                                         continue;
1922                         }
1923
1924                         /* C++, Section 29.3 statement 7 */
1925                         if (last_sc_fence_thread_before && act->is_write() &&
1926                                         *act < *last_sc_fence_thread_before) {
1927                                 added = mo_graph->addEdge(act, curr) || added;
1928                                 break;
1929                         }
1930
1931                         /*
1932                          * Include at most one act per-thread that "happens
1933                          * before" curr
1934                          */
1935                         if (act->happens_before(curr)) {
1936                                 /*
1937                                  * Note: if act is RMW, just add edge:
1938                                  *   act --mo--> curr
1939                                  * The following edge should be handled elsewhere:
1940                                  *   readfrom(act) --mo--> act
1941                                  */
1942                                 if (act->is_write())
1943                                         added = mo_graph->addEdge(act, curr) || added;
1944                                 else if (act->is_read()) {
1945                                         //if previous read accessed a null, just keep going
1946                                         if (act->get_reads_from() == NULL)
1947                                                 continue;
1948                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1949                                 }
1950                                 break;
1951                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1952                                                      !act->same_thread(curr)) {
1953                                 /* We have an action that:
1954                                    (1) did not happen before us
1955                                    (2) is a read and we are a write
1956                                    (3) cannot synchronize with us
1957                                    (4) is in a different thread
1958                                    =>
1959                                    that read could potentially read from our write.  Note that
1960                                    these checks are overly conservative at this point, we'll
1961                                    do more checks before actually removing the
1962                                    pendingfuturevalue.
1963
1964                                  */
1965                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1966                                         if (!is_infeasible())
1967                                                 send_fv->push_back(act);
1968                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1969                                                 add_future_value(curr, act);
1970                                 }
1971                         }
1972                 }
1973         }
1974
1975         /*
1976          * All compatible, thread-exclusive promises must be ordered after any
1977          * concrete stores to the same thread, or else they can be merged with
1978          * this store later
1979          */
1980         for (unsigned int i = 0; i < promises->size(); i++)
1981                 if ((*promises)[i]->is_compatible_exclusive(curr))
1982                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1983
1984         return added;
1985 }
1986
1987 /** Arbitrary reads from the future are not allowed.  Section 29.3
1988  * part 9 places some constraints.  This method checks one result of constraint
1989  * constraint.  Others require compiler support. */
1990 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1991 {
1992         if (!writer->is_rmw())
1993                 return true;
1994
1995         if (!reader->is_rmw())
1996                 return true;
1997
1998         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1999                 if (search == reader)
2000                         return false;
2001                 if (search->get_tid() == reader->get_tid() &&
2002                                 search->happens_before(reader))
2003                         break;
2004         }
2005
2006         return true;
2007 }
2008
2009 /**
2010  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2011  * some constraints. This method checks one the following constraint (others
2012  * require compiler support):
2013  *
2014  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2015  */
2016 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2017 {
2018         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2019         unsigned int i;
2020         /* Iterate over all threads */
2021         for (i = 0; i < thrd_lists->size(); i++) {
2022                 const ModelAction *write_after_read = NULL;
2023
2024                 /* Iterate over actions in thread, starting from most recent */
2025                 action_list_t *list = &(*thrd_lists)[i];
2026                 action_list_t::reverse_iterator rit;
2027                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2028                         ModelAction *act = *rit;
2029
2030                         /* Don't disallow due to act == reader */
2031                         if (!reader->happens_before(act) || reader == act)
2032                                 break;
2033                         else if (act->is_write())
2034                                 write_after_read = act;
2035                         else if (act->is_read() && act->get_reads_from() != NULL)
2036                                 write_after_read = act->get_reads_from();
2037                 }
2038
2039                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2040                         return false;
2041         }
2042         return true;
2043 }
2044
2045 /**
2046  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2047  * The ModelAction under consideration is expected to be taking part in
2048  * release/acquire synchronization as an object of the "reads from" relation.
2049  * Note that this can only provide release sequence support for RMW chains
2050  * which do not read from the future, as those actions cannot be traced until
2051  * their "promise" is fulfilled. Similarly, we may not even establish the
2052  * presence of a release sequence with certainty, as some modification order
2053  * constraints may be decided further in the future. Thus, this function
2054  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2055  * and a boolean representing certainty.
2056  *
2057  * @param rf The action that might be part of a release sequence. Must be a
2058  * write.
2059  * @param release_heads A pass-by-reference style return parameter. After
2060  * execution of this function, release_heads will contain the heads of all the
2061  * relevant release sequences, if any exists with certainty
2062  * @param pending A pass-by-reference style return parameter which is only used
2063  * when returning false (i.e., uncertain). Returns most information regarding
2064  * an uncertain release sequence, including any write operations that might
2065  * break the sequence.
2066  * @return true, if the ModelChecker is certain that release_heads is complete;
2067  * false otherwise
2068  */
2069 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2070                 rel_heads_list_t *release_heads,
2071                 struct release_seq *pending) const
2072 {
2073         /* Only check for release sequences if there are no cycles */
2074         if (mo_graph->checkForCycles())
2075                 return false;
2076
2077         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2078                 ASSERT(rf->is_write());
2079
2080                 if (rf->is_release())
2081                         release_heads->push_back(rf);
2082                 else if (rf->get_last_fence_release())
2083                         release_heads->push_back(rf->get_last_fence_release());
2084                 if (!rf->is_rmw())
2085                         break; /* End of RMW chain */
2086
2087                 /** @todo Need to be smarter here...  In the linux lock
2088                  * example, this will run to the beginning of the program for
2089                  * every acquire. */
2090                 /** @todo The way to be smarter here is to keep going until 1
2091                  * thread has a release preceded by an acquire and you've seen
2092                  *       both. */
2093
2094                 /* acq_rel RMW is a sufficient stopping condition */
2095                 if (rf->is_acquire() && rf->is_release())
2096                         return true; /* complete */
2097         };
2098         if (!rf) {
2099                 /* read from future: need to settle this later */
2100                 pending->rf = NULL;
2101                 return false; /* incomplete */
2102         }
2103
2104         if (rf->is_release())
2105                 return true; /* complete */
2106
2107         /* else relaxed write
2108          * - check for fence-release in the same thread (29.8, stmt. 3)
2109          * - check modification order for contiguous subsequence
2110          *   -> rf must be same thread as release */
2111
2112         const ModelAction *fence_release = rf->get_last_fence_release();
2113         /* Synchronize with a fence-release unconditionally; we don't need to
2114          * find any more "contiguous subsequence..." for it */
2115         if (fence_release)
2116                 release_heads->push_back(fence_release);
2117
2118         int tid = id_to_int(rf->get_tid());
2119         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2120         action_list_t *list = &(*thrd_lists)[tid];
2121         action_list_t::const_reverse_iterator rit;
2122
2123         /* Find rf in the thread list */
2124         rit = std::find(list->rbegin(), list->rend(), rf);
2125         ASSERT(rit != list->rend());
2126
2127         /* Find the last {write,fence}-release */
2128         for (; rit != list->rend(); rit++) {
2129                 if (fence_release && *(*rit) < *fence_release)
2130                         break;
2131                 if ((*rit)->is_release())
2132                         break;
2133         }
2134         if (rit == list->rend()) {
2135                 /* No write-release in this thread */
2136                 return true; /* complete */
2137         } else if (fence_release && *(*rit) < *fence_release) {
2138                 /* The fence-release is more recent (and so, "stronger") than
2139                  * the most recent write-release */
2140                 return true; /* complete */
2141         } /* else, need to establish contiguous release sequence */
2142         ModelAction *release = *rit;
2143
2144         ASSERT(rf->same_thread(release));
2145
2146         pending->writes.clear();
2147
2148         bool certain = true;
2149         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2150                 if (id_to_int(rf->get_tid()) == (int)i)
2151                         continue;
2152                 list = &(*thrd_lists)[i];
2153
2154                 /* Can we ensure no future writes from this thread may break
2155                  * the release seq? */
2156                 bool future_ordered = false;
2157
2158                 ModelAction *last = get_last_action(int_to_id(i));
2159                 Thread *th = get_thread(int_to_id(i));
2160                 if ((last && rf->happens_before(last)) ||
2161                                 !is_enabled(th) ||
2162                                 th->is_complete())
2163                         future_ordered = true;
2164
2165                 ASSERT(!th->is_model_thread() || future_ordered);
2166
2167                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2168                         const ModelAction *act = *rit;
2169                         /* Reach synchronization -> this thread is complete */
2170                         if (act->happens_before(release))
2171                                 break;
2172                         if (rf->happens_before(act)) {
2173                                 future_ordered = true;
2174                                 continue;
2175                         }
2176
2177                         /* Only non-RMW writes can break release sequences */
2178                         if (!act->is_write() || act->is_rmw())
2179                                 continue;
2180
2181                         /* Check modification order */
2182                         if (mo_graph->checkReachable(rf, act)) {
2183                                 /* rf --mo--> act */
2184                                 future_ordered = true;
2185                                 continue;
2186                         }
2187                         if (mo_graph->checkReachable(act, release))
2188                                 /* act --mo--> release */
2189                                 break;
2190                         if (mo_graph->checkReachable(release, act) &&
2191                                       mo_graph->checkReachable(act, rf)) {
2192                                 /* release --mo-> act --mo--> rf */
2193                                 return true; /* complete */
2194                         }
2195                         /* act may break release sequence */
2196                         pending->writes.push_back(act);
2197                         certain = false;
2198                 }
2199                 if (!future_ordered)
2200                         certain = false; /* This thread is uncertain */
2201         }
2202
2203         if (certain) {
2204                 release_heads->push_back(release);
2205                 pending->writes.clear();
2206         } else {
2207                 pending->release = release;
2208                 pending->rf = rf;
2209         }
2210         return certain;
2211 }
2212
2213 /**
2214  * An interface for getting the release sequence head(s) with which a
2215  * given ModelAction must synchronize. This function only returns a non-empty
2216  * result when it can locate a release sequence head with certainty. Otherwise,
2217  * it may mark the internal state of the ModelChecker so that it will handle
2218  * the release sequence at a later time, causing @a acquire to update its
2219  * synchronization at some later point in execution.
2220  *
2221  * @param acquire The 'acquire' action that may synchronize with a release
2222  * sequence
2223  * @param read The read action that may read from a release sequence; this may
2224  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2225  * when 'acquire' is a fence-acquire)
2226  * @param release_heads A pass-by-reference return parameter. Will be filled
2227  * with the head(s) of the release sequence(s), if they exists with certainty.
2228  * @see ModelChecker::release_seq_heads
2229  */
2230 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2231                 ModelAction *read, rel_heads_list_t *release_heads)
2232 {
2233         const ModelAction *rf = read->get_reads_from();
2234         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2235         sequence->acquire = acquire;
2236         sequence->read = read;
2237
2238         if (!release_seq_heads(rf, release_heads, sequence)) {
2239                 /* add act to 'lazy checking' list */
2240                 pending_rel_seqs->push_back(sequence);
2241         } else {
2242                 snapshot_free(sequence);
2243         }
2244 }
2245
2246 /**
2247  * Attempt to resolve all stashed operations that might synchronize with a
2248  * release sequence for a given location. This implements the "lazy" portion of
2249  * determining whether or not a release sequence was contiguous, since not all
2250  * modification order information is present at the time an action occurs.
2251  *
2252  * @param location The location/object that should be checked for release
2253  * sequence resolutions. A NULL value means to check all locations.
2254  * @param work_queue The work queue to which to add work items as they are
2255  * generated
2256  * @return True if any updates occurred (new synchronization, new mo_graph
2257  * edges)
2258  */
2259 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2260 {
2261         bool updated = false;
2262         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2263         while (it != pending_rel_seqs->end()) {
2264                 struct release_seq *pending = *it;
2265                 ModelAction *acquire = pending->acquire;
2266                 const ModelAction *read = pending->read;
2267
2268                 /* Only resolve sequences on the given location, if provided */
2269                 if (location && read->get_location() != location) {
2270                         it++;
2271                         continue;
2272                 }
2273
2274                 const ModelAction *rf = read->get_reads_from();
2275                 rel_heads_list_t release_heads;
2276                 bool complete;
2277                 complete = release_seq_heads(rf, &release_heads, pending);
2278                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2279                         if (!acquire->has_synchronized_with(release_heads[i])) {
2280                                 if (acquire->synchronize_with(release_heads[i]))
2281                                         updated = true;
2282                                 else
2283                                         set_bad_synchronization();
2284                         }
2285                 }
2286
2287                 if (updated) {
2288                         /* Re-check all pending release sequences */
2289                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2290                         /* Re-check read-acquire for mo_graph edges */
2291                         if (acquire->is_read())
2292                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2293
2294                         /* propagate synchronization to later actions */
2295                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2296                         for (; (*rit) != acquire; rit++) {
2297                                 ModelAction *propagate = *rit;
2298                                 if (acquire->happens_before(propagate)) {
2299                                         propagate->synchronize_with(acquire);
2300                                         /* Re-check 'propagate' for mo_graph edges */
2301                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2302                                 }
2303                         }
2304                 }
2305                 if (complete) {
2306                         it = pending_rel_seqs->erase(it);
2307                         snapshot_free(pending);
2308                 } else {
2309                         it++;
2310                 }
2311         }
2312
2313         // If we resolved promises or data races, see if we have realized a data race.
2314         checkDataRaces();
2315
2316         return updated;
2317 }
2318
2319 /**
2320  * Performs various bookkeeping operations for the current ModelAction. For
2321  * instance, adds action to the per-object, per-thread action vector and to the
2322  * action trace list of all thread actions.
2323  *
2324  * @param act is the ModelAction to add.
2325  */
2326 void ModelChecker::add_action_to_lists(ModelAction *act)
2327 {
2328         int tid = id_to_int(act->get_tid());
2329         ModelAction *uninit = NULL;
2330         int uninit_id = -1;
2331         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2332         if (list->empty() && act->is_atomic_var()) {
2333                 uninit = new_uninitialized_action(act->get_location());
2334                 uninit_id = id_to_int(uninit->get_tid());
2335                 list->push_back(uninit);
2336         }
2337         list->push_back(act);
2338
2339         action_trace->push_back(act);
2340         if (uninit)
2341                 action_trace->push_front(uninit);
2342
2343         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2344         if (tid >= (int)vec->size())
2345                 vec->resize(priv->next_thread_id);
2346         (*vec)[tid].push_back(act);
2347         if (uninit)
2348                 (*vec)[uninit_id].push_front(uninit);
2349
2350         if ((int)thrd_last_action->size() <= tid)
2351                 thrd_last_action->resize(get_num_threads());
2352         (*thrd_last_action)[tid] = act;
2353         if (uninit)
2354                 (*thrd_last_action)[uninit_id] = uninit;
2355
2356         if (act->is_fence() && act->is_release()) {
2357                 if ((int)thrd_last_fence_release->size() <= tid)
2358                         thrd_last_fence_release->resize(get_num_threads());
2359                 (*thrd_last_fence_release)[tid] = act;
2360         }
2361
2362         if (act->is_wait()) {
2363                 void *mutex_loc = (void *) act->get_value();
2364                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2365
2366                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2367                 if (tid >= (int)vec->size())
2368                         vec->resize(priv->next_thread_id);
2369                 (*vec)[tid].push_back(act);
2370         }
2371 }
2372
2373 /**
2374  * @brief Get the last action performed by a particular Thread
2375  * @param tid The thread ID of the Thread in question
2376  * @return The last action in the thread
2377  */
2378 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2379 {
2380         int threadid = id_to_int(tid);
2381         if (threadid < (int)thrd_last_action->size())
2382                 return (*thrd_last_action)[id_to_int(tid)];
2383         else
2384                 return NULL;
2385 }
2386
2387 /**
2388  * @brief Get the last fence release performed by a particular Thread
2389  * @param tid The thread ID of the Thread in question
2390  * @return The last fence release in the thread, if one exists; NULL otherwise
2391  */
2392 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2393 {
2394         int threadid = id_to_int(tid);
2395         if (threadid < (int)thrd_last_fence_release->size())
2396                 return (*thrd_last_fence_release)[id_to_int(tid)];
2397         else
2398                 return NULL;
2399 }
2400
2401 /**
2402  * Gets the last memory_order_seq_cst write (in the total global sequence)
2403  * performed on a particular object (i.e., memory location), not including the
2404  * current action.
2405  * @param curr The current ModelAction; also denotes the object location to
2406  * check
2407  * @return The last seq_cst write
2408  */
2409 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2410 {
2411         void *location = curr->get_location();
2412         action_list_t *list = get_safe_ptr_action(obj_map, location);
2413         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2414         action_list_t::reverse_iterator rit;
2415         for (rit = list->rbegin(); rit != list->rend(); rit++)
2416                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2417                         return *rit;
2418         return NULL;
2419 }
2420
2421 /**
2422  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2423  * performed in a particular thread, prior to a particular fence.
2424  * @param tid The ID of the thread to check
2425  * @param before_fence The fence from which to begin the search; if NULL, then
2426  * search for the most recent fence in the thread.
2427  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2428  */
2429 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2430 {
2431         /* All fences should have NULL location */
2432         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2433         action_list_t::reverse_iterator rit = list->rbegin();
2434
2435         if (before_fence) {
2436                 for (; rit != list->rend(); rit++)
2437                         if (*rit == before_fence)
2438                                 break;
2439
2440                 ASSERT(*rit == before_fence);
2441                 rit++;
2442         }
2443
2444         for (; rit != list->rend(); rit++)
2445                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2446                         return *rit;
2447         return NULL;
2448 }
2449
2450 /**
2451  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2452  * location). This function identifies the mutex according to the current
2453  * action, which is presumed to perform on the same mutex.
2454  * @param curr The current ModelAction; also denotes the object location to
2455  * check
2456  * @return The last unlock operation
2457  */
2458 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2459 {
2460         void *location = curr->get_location();
2461         action_list_t *list = get_safe_ptr_action(obj_map, location);
2462         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2463         action_list_t::reverse_iterator rit;
2464         for (rit = list->rbegin(); rit != list->rend(); rit++)
2465                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2466                         return *rit;
2467         return NULL;
2468 }
2469
2470 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2471 {
2472         ModelAction *parent = get_last_action(tid);
2473         if (!parent)
2474                 parent = get_thread(tid)->get_creation();
2475         return parent;
2476 }
2477
2478 /**
2479  * Returns the clock vector for a given thread.
2480  * @param tid The thread whose clock vector we want
2481  * @return Desired clock vector
2482  */
2483 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2484 {
2485         return get_parent_action(tid)->get_cv();
2486 }
2487
2488 /**
2489  * @brief Find the promise, if any to resolve for the current action
2490  * @param curr The current ModelAction. Should be a write.
2491  * @return The (non-negative) index for the Promise to resolve, if any;
2492  * otherwise -1
2493  */
2494 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2495 {
2496         for (unsigned int i = 0; i < promises->size(); i++)
2497                 if (curr->get_node()->get_promise(i))
2498                         return i;
2499         return -1;
2500 }
2501
2502 /**
2503  * Resolve a Promise with a current write.
2504  * @param write The ModelAction that is fulfilling Promises
2505  * @param promise_idx The index corresponding to the promise
2506  * @return True if the Promise was successfully resolved; false otherwise
2507  */
2508 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2509 {
2510         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2511         Promise *promise = (*promises)[promise_idx];
2512
2513         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2514                 ModelAction *read = promise->get_reader(i);
2515                 read_from(read, write);
2516                 actions_to_check.push_back(read);
2517         }
2518         /* Make sure the promise's value matches the write's value */
2519         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2520         if (!mo_graph->resolvePromise(promise, write))
2521                 priv->failed_promise = true;
2522
2523         promises->erase(promises->begin() + promise_idx);
2524         /**
2525          * @todo  It is possible to end up in an inconsistent state, where a
2526          * "resolved" promise may still be referenced if
2527          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2528          *
2529          * Note that the inconsistency only matters when dumping mo_graph to
2530          * file.
2531          *
2532          * delete promise;
2533          */
2534
2535         //Check whether reading these writes has made threads unable to
2536         //resolve promises
2537         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2538                 ModelAction *read = actions_to_check[i];
2539                 mo_check_promises(read, true);
2540         }
2541
2542         return true;
2543 }
2544
2545 /**
2546  * Compute the set of promises that could potentially be satisfied by this
2547  * action. Note that the set computation actually appears in the Node, not in
2548  * ModelChecker.
2549  * @param curr The ModelAction that may satisfy promises
2550  */
2551 void ModelChecker::compute_promises(ModelAction *curr)
2552 {
2553         for (unsigned int i = 0; i < promises->size(); i++) {
2554                 Promise *promise = (*promises)[i];
2555                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2556                         continue;
2557
2558                 bool satisfy = true;
2559                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2560                         const ModelAction *act = promise->get_reader(j);
2561                         if (act->happens_before(curr) ||
2562                                         act->could_synchronize_with(curr)) {
2563                                 satisfy = false;
2564                                 break;
2565                         }
2566                 }
2567                 if (satisfy)
2568                         curr->get_node()->set_promise(i);
2569         }
2570 }
2571
2572 /** Checks promises in response to change in ClockVector Threads. */
2573 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2574 {
2575         for (unsigned int i = 0; i < promises->size(); i++) {
2576                 Promise *promise = (*promises)[i];
2577                 if (!promise->thread_is_available(tid))
2578                         continue;
2579                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2580                         const ModelAction *act = promise->get_reader(j);
2581                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2582                                         merge_cv->synchronized_since(act)) {
2583                                 if (promise->eliminate_thread(tid)) {
2584                                         /* Promise has failed */
2585                                         priv->failed_promise = true;
2586                                         return;
2587                                 }
2588                         }
2589                 }
2590         }
2591 }
2592
2593 void ModelChecker::check_promises_thread_disabled()
2594 {
2595         for (unsigned int i = 0; i < promises->size(); i++) {
2596                 Promise *promise = (*promises)[i];
2597                 if (promise->has_failed()) {
2598                         priv->failed_promise = true;
2599                         return;
2600                 }
2601         }
2602 }
2603
2604 /**
2605  * @brief Checks promises in response to addition to modification order for
2606  * threads.
2607  *
2608  * We test whether threads are still available for satisfying promises after an
2609  * addition to our modification order constraints. Those that are unavailable
2610  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2611  * that promise has failed.
2612  *
2613  * @param act The ModelAction which updated the modification order
2614  * @param is_read_check Should be true if act is a read and we must check for
2615  * updates to the store from which it read (there is a distinction here for
2616  * RMW's, which are both a load and a store)
2617  */
2618 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2619 {
2620         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2621
2622         for (unsigned int i = 0; i < promises->size(); i++) {
2623                 Promise *promise = (*promises)[i];
2624
2625                 // Is this promise on the same location?
2626                 if (!promise->same_location(write))
2627                         continue;
2628
2629                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2630                         const ModelAction *pread = promise->get_reader(j);
2631                         if (!pread->happens_before(act))
2632                                continue;
2633                         if (mo_graph->checkPromise(write, promise)) {
2634                                 priv->failed_promise = true;
2635                                 return;
2636                         }
2637                         break;
2638                 }
2639
2640                 // Don't do any lookups twice for the same thread
2641                 if (!promise->thread_is_available(act->get_tid()))
2642                         continue;
2643
2644                 if (mo_graph->checkReachable(promise, write)) {
2645                         if (mo_graph->checkPromise(write, promise)) {
2646                                 priv->failed_promise = true;
2647                                 return;
2648                         }
2649                 }
2650         }
2651 }
2652
2653 /**
2654  * Compute the set of writes that may break the current pending release
2655  * sequence. This information is extracted from previou release sequence
2656  * calculations.
2657  *
2658  * @param curr The current ModelAction. Must be a release sequence fixup
2659  * action.
2660  */
2661 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2662 {
2663         if (pending_rel_seqs->empty())
2664                 return;
2665
2666         struct release_seq *pending = pending_rel_seqs->back();
2667         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2668                 const ModelAction *write = pending->writes[i];
2669                 curr->get_node()->add_relseq_break(write);
2670         }
2671
2672         /* NULL means don't break the sequence; just synchronize */
2673         curr->get_node()->add_relseq_break(NULL);
2674 }
2675
2676 /**
2677  * Build up an initial set of all past writes that this 'read' action may read
2678  * from, as well as any previously-observed future values that must still be valid.
2679  *
2680  * @param curr is the current ModelAction that we are exploring; it must be a
2681  * 'read' operation.
2682  */
2683 void ModelChecker::build_may_read_from(ModelAction *curr)
2684 {
2685         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2686         unsigned int i;
2687         ASSERT(curr->is_read());
2688
2689         ModelAction *last_sc_write = NULL;
2690
2691         if (curr->is_seqcst())
2692                 last_sc_write = get_last_seq_cst_write(curr);
2693
2694         /* Iterate over all threads */
2695         for (i = 0; i < thrd_lists->size(); i++) {
2696                 /* Iterate over actions in thread, starting from most recent */
2697                 action_list_t *list = &(*thrd_lists)[i];
2698                 action_list_t::reverse_iterator rit;
2699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2700                         ModelAction *act = *rit;
2701
2702                         /* Only consider 'write' actions */
2703                         if (!act->is_write() || act == curr)
2704                                 continue;
2705
2706                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2707                         bool allow_read = true;
2708
2709                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2710                                 allow_read = false;
2711                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2712                                 allow_read = false;
2713
2714                         if (allow_read) {
2715                                 /* Only add feasible reads */
2716                                 mo_graph->startChanges();
2717                                 r_modification_order(curr, act);
2718                                 if (!is_infeasible())
2719                                         curr->get_node()->add_read_from_past(act);
2720                                 mo_graph->rollbackChanges();
2721                         }
2722
2723                         /* Include at most one act per-thread that "happens before" curr */
2724                         if (act->happens_before(curr))
2725                                 break;
2726                 }
2727         }
2728
2729         /* Inherit existing, promised future values */
2730         for (i = 0; i < promises->size(); i++) {
2731                 const Promise *promise = (*promises)[i];
2732                 const ModelAction *promise_read = promise->get_reader(0);
2733                 if (promise_read->same_var(curr)) {
2734                         /* Only add feasible future-values */
2735                         mo_graph->startChanges();
2736                         r_modification_order(curr, promise);
2737                         if (!is_infeasible())
2738                                 curr->get_node()->add_read_from_promise(promise_read);
2739                         mo_graph->rollbackChanges();
2740                 }
2741         }
2742
2743         /* We may find no valid may-read-from only if the execution is doomed */
2744         if (!curr->get_node()->read_from_size()) {
2745                 priv->no_valid_reads = true;
2746                 set_assert();
2747         }
2748
2749         if (DBG_ENABLED()) {
2750                 model_print("Reached read action:\n");
2751                 curr->print();
2752                 model_print("Printing read_from_past\n");
2753                 curr->get_node()->print_read_from_past();
2754                 model_print("End printing read_from_past\n");
2755         }
2756 }
2757
2758 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2759 {
2760         for ( ; write != NULL; write = write->get_reads_from()) {
2761                 /* UNINIT actions don't have a Node, and they never sleep */
2762                 if (write->is_uninitialized())
2763                         return true;
2764                 Node *prevnode = write->get_node()->get_parent();
2765
2766                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2767                 if (write->is_release() && thread_sleep)
2768                         return true;
2769                 if (!write->is_rmw())
2770                         return false;
2771         }
2772         return true;
2773 }
2774
2775 /**
2776  * @brief Create a new action representing an uninitialized atomic
2777  * @param location The memory location of the atomic object
2778  * @return A pointer to a new ModelAction
2779  */
2780 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2781 {
2782         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2783         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2784         act->create_cv(NULL);
2785         return act;
2786 }
2787
2788 static void print_list(action_list_t *list)
2789 {
2790         action_list_t::iterator it;
2791
2792         model_print("---------------------------------------------------------------------\n");
2793
2794         unsigned int hash = 0;
2795
2796         for (it = list->begin(); it != list->end(); it++) {
2797                 (*it)->print();
2798                 hash = hash^(hash<<3)^((*it)->hash());
2799         }
2800         model_print("HASH %u\n", hash);
2801         model_print("---------------------------------------------------------------------\n");
2802 }
2803
2804 #if SUPPORT_MOD_ORDER_DUMP
2805 void ModelChecker::dumpGraph(char *filename) const
2806 {
2807         char buffer[200];
2808         sprintf(buffer, "%s.dot", filename);
2809         FILE *file = fopen(buffer, "w");
2810         fprintf(file, "digraph %s {\n", filename);
2811         mo_graph->dumpNodes(file);
2812         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2813
2814         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2815                 ModelAction *act = *it;
2816                 if (act->is_read()) {
2817                         mo_graph->dot_print_node(file, act);
2818                         if (act->get_reads_from())
2819                                 mo_graph->dot_print_edge(file,
2820                                                 act->get_reads_from(),
2821                                                 act,
2822                                                 "label=\"rf\", color=red, weight=2");
2823                         else
2824                                 mo_graph->dot_print_edge(file,
2825                                                 act->get_reads_from_promise(),
2826                                                 act,
2827                                                 "label=\"rf\", color=red");
2828                 }
2829                 if (thread_array[act->get_tid()]) {
2830                         mo_graph->dot_print_edge(file,
2831                                         thread_array[id_to_int(act->get_tid())],
2832                                         act,
2833                                         "label=\"sb\", color=blue, weight=400");
2834                 }
2835
2836                 thread_array[act->get_tid()] = act;
2837         }
2838         fprintf(file, "}\n");
2839         model_free(thread_array);
2840         fclose(file);
2841 }
2842 #endif
2843
2844 /** @brief Prints an execution trace summary. */
2845 void ModelChecker::print_summary() const
2846 {
2847 #if SUPPORT_MOD_ORDER_DUMP
2848         char buffername[100];
2849         sprintf(buffername, "exec%04u", stats.num_total);
2850         mo_graph->dumpGraphToFile(buffername);
2851         sprintf(buffername, "graph%04u", stats.num_total);
2852         dumpGraph(buffername);
2853 #endif
2854
2855         model_print("Execution %d:", stats.num_total);
2856         if (isfeasibleprefix()) {
2857                 if (scheduler->all_threads_sleeping())
2858                         model_print(" SLEEP-SET REDUNDANT");
2859                 model_print("\n");
2860         } else
2861                 print_infeasibility(" INFEASIBLE");
2862         print_list(action_trace);
2863         model_print("\n");
2864 }
2865
2866 /**
2867  * Add a Thread to the system for the first time. Should only be called once
2868  * per thread.
2869  * @param t The Thread to add
2870  */
2871 void ModelChecker::add_thread(Thread *t)
2872 {
2873         thread_map->put(id_to_int(t->get_id()), t);
2874         scheduler->add_thread(t);
2875 }
2876
2877 /**
2878  * Removes a thread from the scheduler.
2879  * @param the thread to remove.
2880  */
2881 void ModelChecker::remove_thread(Thread *t)
2882 {
2883         scheduler->remove_thread(t);
2884 }
2885
2886 /**
2887  * @brief Get a Thread reference by its ID
2888  * @param tid The Thread's ID
2889  * @return A Thread reference
2890  */
2891 Thread * ModelChecker::get_thread(thread_id_t tid) const
2892 {
2893         return thread_map->get(id_to_int(tid));
2894 }
2895
2896 /**
2897  * @brief Get a reference to the Thread in which a ModelAction was executed
2898  * @param act The ModelAction
2899  * @return A Thread reference
2900  */
2901 Thread * ModelChecker::get_thread(const ModelAction *act) const
2902 {
2903         return get_thread(act->get_tid());
2904 }
2905
2906 /**
2907  * @brief Get a Promise's "promise number"
2908  *
2909  * A "promise number" is an index number that is unique to a promise, valid
2910  * only for a specific snapshot of an execution trace. Promises may come and go
2911  * as they are generated an resolved, so an index only retains meaning for the
2912  * current snapshot.
2913  *
2914  * @param promise The Promise to check
2915  * @return The promise index, if the promise still is valid; otherwise -1
2916  */
2917 int ModelChecker::get_promise_number(const Promise *promise) const
2918 {
2919         for (unsigned int i = 0; i < promises->size(); i++)
2920                 if ((*promises)[i] == promise)
2921                         return i;
2922         /* Not found */
2923         return -1;
2924 }
2925
2926 /**
2927  * @brief Check if a Thread is currently enabled
2928  * @param t The Thread to check
2929  * @return True if the Thread is currently enabled
2930  */
2931 bool ModelChecker::is_enabled(Thread *t) const
2932 {
2933         return scheduler->is_enabled(t);
2934 }
2935
2936 /**
2937  * @brief Check if a Thread is currently enabled
2938  * @param tid The ID of the Thread to check
2939  * @return True if the Thread is currently enabled
2940  */
2941 bool ModelChecker::is_enabled(thread_id_t tid) const
2942 {
2943         return scheduler->is_enabled(tid);
2944 }
2945
2946 /**
2947  * Switch from a model-checker context to a user-thread context. This is the
2948  * complement of ModelChecker::switch_to_master and must be called from the
2949  * model-checker context
2950  *
2951  * @param thread The user-thread to switch to
2952  */
2953 void ModelChecker::switch_from_master(Thread *thread)
2954 {
2955         scheduler->set_current_thread(thread);
2956         Thread::swap(&system_context, thread);
2957 }
2958
2959 /**
2960  * Switch from a user-context to the "master thread" context (a.k.a. system
2961  * context). This switch is made with the intention of exploring a particular
2962  * model-checking action (described by a ModelAction object). Must be called
2963  * from a user-thread context.
2964  *
2965  * @param act The current action that will be explored. May be NULL only if
2966  * trace is exiting via an assertion (see ModelChecker::set_assert and
2967  * ModelChecker::has_asserted).
2968  * @return Return the value returned by the current action
2969  */
2970 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2971 {
2972         DBG();
2973         Thread *old = thread_current();
2974         ASSERT(!old->get_pending());
2975         old->set_pending(act);
2976         if (Thread::swap(old, &system_context) < 0) {
2977                 perror("swap threads");
2978                 exit(EXIT_FAILURE);
2979         }
2980         return old->get_return_value();
2981 }
2982
2983 /**
2984  * Takes the next step in the execution, if possible.
2985  * @param curr The current step to take
2986  * @return Returns the next Thread to run, if any; NULL if this execution
2987  * should terminate
2988  */
2989 Thread * ModelChecker::take_step(ModelAction *curr)
2990 {
2991         Thread *curr_thrd = get_thread(curr);
2992         ASSERT(curr_thrd->get_state() == THREAD_READY);
2993
2994         curr = check_current_action(curr);
2995
2996         /* Infeasible -> don't take any more steps */
2997         if (is_infeasible())
2998                 return NULL;
2999         else if (isfeasibleprefix() && have_bug_reports()) {
3000                 set_assert();
3001                 return NULL;
3002         }
3003
3004         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3005                 return NULL;
3006
3007         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3008                 scheduler->remove_thread(curr_thrd);
3009
3010         Thread *next_thrd = get_next_thread(curr);
3011
3012         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3013                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3014
3015         return next_thrd;
3016 }
3017
3018 /** Wrapper to run the user's main function, with appropriate arguments */
3019 void user_main_wrapper(void *)
3020 {
3021         user_main(model->params.argc, model->params.argv);
3022 }
3023
3024 /** @brief Run ModelChecker for the user program */
3025 void ModelChecker::run()
3026 {
3027         do {
3028                 thrd_t user_thread;
3029                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3030                 add_thread(t);
3031
3032                 do {
3033                         /*
3034                          * Stash next pending action(s) for thread(s). There
3035                          * should only need to stash one thread's action--the
3036                          * thread which just took a step--plus the first step
3037                          * for any newly-created thread
3038                          */
3039                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3040                                 thread_id_t tid = int_to_id(i);
3041                                 Thread *thr = get_thread(tid);
3042                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3043                                         switch_from_master(thr);
3044                                 }
3045                         }
3046
3047                         /* Catch assertions from prior take_step or from
3048                          * between-ModelAction bugs (e.g., data races) */
3049                         if (has_asserted())
3050                                 break;
3051
3052                         /* Consume the next action for a Thread */
3053                         ModelAction *curr = t->get_pending();
3054                         t->set_pending(NULL);
3055                         t = take_step(curr);
3056                 } while (t && !t->is_model_thread());
3057
3058                 /*
3059                  * Launch end-of-execution release sequence fixups only when
3060                  * the execution is otherwise feasible AND there are:
3061                  *
3062                  * (1) pending release sequences
3063                  * (2) pending assertions that could be invalidated by a change
3064                  * in clock vectors (i.e., data races)
3065                  * (3) no pending promises
3066                  */
3067                 while (!pending_rel_seqs->empty() &&
3068                                 is_feasible_prefix_ignore_relseq() &&
3069                                 !unrealizedraces.empty()) {
3070                         model_print("*** WARNING: release sequence fixup action "
3071                                         "(%zu pending release seuqence(s)) ***\n",
3072                                         pending_rel_seqs->size());
3073                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3074                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3075                                         model_thread);
3076                         take_step(fixup);
3077                 };
3078         } while (next_execution());
3079
3080         model_print("******* Model-checking complete: *******\n");
3081         print_stats();
3082 }