add some support for traceanalysis plugins
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "snapshot-interface.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "output.h"
19 #include "traceanalysis.h"
20
21 #define INITIAL_THREAD_ID       0
22
23 ModelChecker *model;
24
25 struct bug_message {
26         bug_message(const char *str) {
27                 const char *fmt = "  [BUG] %s\n";
28                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
29                 sprintf(msg, fmt, str);
30         }
31         ~bug_message() { if (msg) snapshot_free(msg); }
32
33         char *msg;
34         void print() { model_print("%s", msg); }
35
36         SNAPSHOTALLOC
37 };
38
39 /**
40  * Structure for holding small ModelChecker members that should be snapshotted
41  */
42 struct model_snapshot_members {
43         model_snapshot_members() :
44                 /* First thread created will have id INITIAL_THREAD_ID */
45                 next_thread_id(INITIAL_THREAD_ID),
46                 used_sequence_numbers(0),
47                 next_backtrack(NULL),
48                 bugs(),
49                 stats(),
50                 failed_promise(false),
51                 too_many_reads(false),
52                 no_valid_reads(false),
53                 bad_synchronization(false),
54                 asserted(false)
55         { }
56
57         ~model_snapshot_members() {
58                 for (unsigned int i = 0; i < bugs.size(); i++)
59                         delete bugs[i];
60                 bugs.clear();
61         }
62
63         unsigned int next_thread_id;
64         modelclock_t used_sequence_numbers;
65         ModelAction *next_backtrack;
66         SnapVector<bug_message *> bugs;
67         struct execution_stats stats;
68         bool failed_promise;
69         bool too_many_reads;
70         bool no_valid_reads;
71         /** @brief Incorrectly-ordered synchronization was made */
72         bool bad_synchronization;
73         bool asserted;
74
75         SNAPSHOTALLOC
76 };
77
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80         /* Initialize default scheduler */
81         params(params),
82         scheduler(new Scheduler()),
83         diverge(NULL),
84         earliest_diverge(NULL),
85         action_trace(new action_list_t()),
86         thread_map(new HashTable<int, Thread *, int>()),
87         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
90         promises(new SnapVector<Promise *>()),
91         futurevalues(new SnapVector<struct PendingFutureValue>()),
92         pending_rel_seqs(new SnapVector<struct release_seq *>()),
93         thrd_last_action(new SnapVector<ModelAction *>(1)),
94         thrd_last_fence_release(new SnapVector<ModelAction *>()),
95         node_stack(new NodeStack()),
96         trace_analyses(new ModelVector<Trace_Analysis *>()),
97         priv(new struct model_snapshot_members()),
98         mo_graph(new CycleGraph())
99 {
100         /* Initialize a model-checker thread, for special ModelActions */
101         model_thread = new Thread(get_next_id());
102         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 }
104
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
107 {
108         for (unsigned int i = 0; i < get_num_threads(); i++)
109                 delete thread_map->get(i);
110         delete thread_map;
111
112         delete obj_thrd_map;
113         delete obj_map;
114         delete condvar_waiters_map;
115         delete action_trace;
116
117         for (unsigned int i = 0; i < promises->size(); i++)
118                 delete (*promises)[i];
119         delete promises;
120
121         delete pending_rel_seqs;
122
123         delete thrd_last_action;
124         delete thrd_last_fence_release;
125         delete node_stack;
126         for (unsigned int i = 0; i <trace_analyses->size();i++)
127                 delete (*trace_analyses)[i];
128         delete trace_analyses;
129         delete scheduler;
130         delete mo_graph;
131         delete priv;
132 }
133
134 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
135 {
136         action_list_t *tmp = hash->get(ptr);
137         if (tmp == NULL) {
138                 tmp = new action_list_t();
139                 hash->put(ptr, tmp);
140         }
141         return tmp;
142 }
143
144 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
145 {
146         SnapVector<action_list_t> *tmp = hash->get(ptr);
147         if (tmp == NULL) {
148                 tmp = new SnapVector<action_list_t>();
149                 hash->put(ptr, tmp);
150         }
151         return tmp;
152 }
153
154 /**
155  * Restores user program to initial state and resets all model-checker data
156  * structures.
157  */
158 void ModelChecker::reset_to_initial_state()
159 {
160         DEBUG("+++ Resetting to initial state +++\n");
161         node_stack->reset_execution();
162
163         /**
164          * FIXME: if we utilize partial rollback, we will need to free only
165          * those pending actions which were NOT pending before the rollback
166          * point
167          */
168         for (unsigned int i = 0; i < get_num_threads(); i++)
169                 delete get_thread(int_to_id(i))->get_pending();
170
171         snapshot_backtrack_before(0);
172 }
173
174 /** @return a thread ID for a new Thread */
175 thread_id_t ModelChecker::get_next_id()
176 {
177         return priv->next_thread_id++;
178 }
179
180 /** @return the number of user threads created during this execution */
181 unsigned int ModelChecker::get_num_threads() const
182 {
183         return priv->next_thread_id;
184 }
185
186 /**
187  * Must be called from user-thread context (e.g., through the global
188  * thread_current() interface)
189  *
190  * @return The currently executing Thread.
191  */
192 Thread * ModelChecker::get_current_thread() const
193 {
194         return scheduler->get_current_thread();
195 }
196
197 /** @return a sequence number for a new ModelAction */
198 modelclock_t ModelChecker::get_next_seq_num()
199 {
200         return ++priv->used_sequence_numbers;
201 }
202
203 Node * ModelChecker::get_curr_node() const
204 {
205         return node_stack->get_head();
206 }
207
208 /**
209  * @brief Select the next thread to execute based on the curren action
210  *
211  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
212  * actions should be followed by the execution of their child thread. In either
213  * case, the current action should determine the next thread schedule.
214  *
215  * @param curr The current action
216  * @return The next thread to run, if the current action will determine this
217  * selection; otherwise NULL
218  */
219 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
220 {
221         /* Do not split atomic RMW */
222         if (curr->is_rmwr())
223                 return get_thread(curr);
224         /* Follow CREATE with the created thread */
225         if (curr->get_type() == THREAD_CREATE)
226                 return curr->get_thread_operand();
227         return NULL;
228 }
229
230 /**
231  * @brief Choose the next thread to execute.
232  *
233  * This function chooses the next thread that should execute. It can enforce
234  * execution replay/backtracking or, if the model-checker has no preference
235  * regarding the next thread (i.e., when exploring a new execution ordering),
236  * we defer to the scheduler.
237  *
238  * @return The next chosen thread to run, if any exist. Or else if the current
239  * execution should terminate, return NULL.
240  */
241 Thread * ModelChecker::get_next_thread()
242 {
243         thread_id_t tid;
244
245         /*
246          * Have we completed exploring the preselected path? Then let the
247          * scheduler decide
248          */
249         if (diverge == NULL)
250                 return scheduler->select_next_thread();
251
252         /* Else, we are trying to replay an execution */
253         ModelAction *next = node_stack->get_next()->get_action();
254
255         if (next == diverge) {
256                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
257                         earliest_diverge = diverge;
258
259                 Node *nextnode = next->get_node();
260                 Node *prevnode = nextnode->get_parent();
261                 scheduler->update_sleep_set(prevnode);
262
263                 /* Reached divergence point */
264                 if (nextnode->increment_behaviors()) {
265                         /* Execute the same thread with a new behavior */
266                         tid = next->get_tid();
267                         node_stack->pop_restofstack(2);
268                 } else {
269                         ASSERT(prevnode);
270                         /* Make a different thread execute for next step */
271                         scheduler->add_sleep(get_thread(next->get_tid()));
272                         tid = prevnode->get_next_backtrack();
273                         /* Make sure the backtracked thread isn't sleeping. */
274                         node_stack->pop_restofstack(1);
275                         if (diverge == earliest_diverge) {
276                                 earliest_diverge = prevnode->get_action();
277                         }
278                 }
279                 /* Start the round robin scheduler from this thread id */
280                 scheduler->set_scheduler_thread(tid);
281                 /* The correct sleep set is in the parent node. */
282                 execute_sleep_set();
283
284                 DEBUG("*** Divergence point ***\n");
285
286                 diverge = NULL;
287         } else {
288                 tid = next->get_tid();
289         }
290         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
291         ASSERT(tid != THREAD_ID_T_NONE);
292         return get_thread(id_to_int(tid));
293 }
294
295 /**
296  * We need to know what the next actions of all threads in the sleep
297  * set will be.  This method computes them and stores the actions at
298  * the corresponding thread object's pending action.
299  */
300
301 void ModelChecker::execute_sleep_set()
302 {
303         for (unsigned int i = 0; i < get_num_threads(); i++) {
304                 thread_id_t tid = int_to_id(i);
305                 Thread *thr = get_thread(tid);
306                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
307                         thr->get_pending()->set_sleep_flag();
308                 }
309         }
310 }
311
312 /**
313  * @brief Should the current action wake up a given thread?
314  *
315  * @param curr The current action
316  * @param thread The thread that we might wake up
317  * @return True, if we should wake up the sleeping thread; false otherwise
318  */
319 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
320 {
321         const ModelAction *asleep = thread->get_pending();
322         /* Don't allow partial RMW to wake anyone up */
323         if (curr->is_rmwr())
324                 return false;
325         /* Synchronizing actions may have been backtracked */
326         if (asleep->could_synchronize_with(curr))
327                 return true;
328         /* All acquire/release fences and fence-acquire/store-release */
329         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
330                 return true;
331         /* Fence-release + store can awake load-acquire on the same location */
332         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
333                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
334                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
335                         return true;
336         }
337         return false;
338 }
339
340 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
341 {
342         for (unsigned int i = 0; i < get_num_threads(); i++) {
343                 Thread *thr = get_thread(int_to_id(i));
344                 if (scheduler->is_sleep_set(thr)) {
345                         if (should_wake_up(curr, thr))
346                                 /* Remove this thread from sleep set */
347                                 scheduler->remove_sleep(thr);
348                 }
349         }
350 }
351
352 /** @brief Alert the model-checker that an incorrectly-ordered
353  * synchronization was made */
354 void ModelChecker::set_bad_synchronization()
355 {
356         priv->bad_synchronization = true;
357 }
358
359 /**
360  * Check whether the current trace has triggered an assertion which should halt
361  * its execution.
362  *
363  * @return True, if the execution should be aborted; false otherwise
364  */
365 bool ModelChecker::has_asserted() const
366 {
367         return priv->asserted;
368 }
369
370 /**
371  * Trigger a trace assertion which should cause this execution to be halted.
372  * This can be due to a detected bug or due to an infeasibility that should
373  * halt ASAP.
374  */
375 void ModelChecker::set_assert()
376 {
377         priv->asserted = true;
378 }
379
380 /**
381  * Check if we are in a deadlock. Should only be called at the end of an
382  * execution, although it should not give false positives in the middle of an
383  * execution (there should be some ENABLED thread).
384  *
385  * @return True if program is in a deadlock; false otherwise
386  */
387 bool ModelChecker::is_deadlocked() const
388 {
389         bool blocking_threads = false;
390         for (unsigned int i = 0; i < get_num_threads(); i++) {
391                 thread_id_t tid = int_to_id(i);
392                 if (is_enabled(tid))
393                         return false;
394                 Thread *t = get_thread(tid);
395                 if (!t->is_model_thread() && t->get_pending())
396                         blocking_threads = true;
397         }
398         return blocking_threads;
399 }
400
401 /**
402  * Check if this is a complete execution. That is, have all thread completed
403  * execution (rather than exiting because sleep sets have forced a redundant
404  * execution).
405  *
406  * @return True if the execution is complete.
407  */
408 bool ModelChecker::is_complete_execution() const
409 {
410         for (unsigned int i = 0; i < get_num_threads(); i++)
411                 if (is_enabled(int_to_id(i)))
412                         return false;
413         return true;
414 }
415
416 /**
417  * @brief Assert a bug in the executing program.
418  *
419  * Use this function to assert any sort of bug in the user program. If the
420  * current trace is feasible (actually, a prefix of some feasible execution),
421  * then this execution will be aborted, printing the appropriate message. If
422  * the current trace is not yet feasible, the error message will be stashed and
423  * printed if the execution ever becomes feasible.
424  *
425  * @param msg Descriptive message for the bug (do not include newline char)
426  * @return True if bug is immediately-feasible
427  */
428 bool ModelChecker::assert_bug(const char *msg, ...)
429 {
430         char str[800];
431
432         va_list ap;
433         va_start(ap, msg);
434         vsnprintf(str, sizeof(str), msg, ap);
435         va_end(ap);
436
437         priv->bugs.push_back(new bug_message(str));
438
439         if (isfeasibleprefix()) {
440                 set_assert();
441                 return true;
442         }
443         return false;
444 }
445
446 /**
447  * @brief Assert a bug in the executing program, asserted by a user thread
448  * @see ModelChecker::assert_bug
449  * @param msg Descriptive message for the bug (do not include newline char)
450  */
451 void ModelChecker::assert_user_bug(const char *msg)
452 {
453         /* If feasible bug, bail out now */
454         if (assert_bug(msg))
455                 switch_to_master(NULL);
456 }
457
458 /** @return True, if any bugs have been reported for this execution */
459 bool ModelChecker::have_bug_reports() const
460 {
461         return priv->bugs.size() != 0;
462 }
463
464 /** @brief Print bug report listing for this execution (if any bugs exist) */
465 void ModelChecker::print_bugs() const
466 {
467         if (have_bug_reports()) {
468                 model_print("Bug report: %zu bug%s detected\n",
469                                 priv->bugs.size(),
470                                 priv->bugs.size() > 1 ? "s" : "");
471                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
472                         priv->bugs[i]->print();
473         }
474 }
475
476 /**
477  * @brief Record end-of-execution stats
478  *
479  * Must be run when exiting an execution. Records various stats.
480  * @see struct execution_stats
481  */
482 void ModelChecker::record_stats()
483 {
484         stats.num_total++;
485         if (!isfeasibleprefix())
486                 stats.num_infeasible++;
487         else if (have_bug_reports())
488                 stats.num_buggy_executions++;
489         else if (is_complete_execution())
490                 stats.num_complete++;
491         else {
492                 stats.num_redundant++;
493
494                 /**
495                  * @todo We can violate this ASSERT() when fairness/sleep sets
496                  * conflict to cause an execution to terminate, e.g. with:
497                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
498                  */
499                 //ASSERT(scheduler->all_threads_sleeping());
500         }
501 }
502
503 /** @brief Print execution stats */
504 void ModelChecker::print_stats() const
505 {
506         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
507         model_print("Number of redundant executions: %d\n", stats.num_redundant);
508         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
509         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
510         model_print("Total executions: %d\n", stats.num_total);
511         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
512 }
513
514 /**
515  * @brief End-of-exeuction print
516  * @param printbugs Should any existing bugs be printed?
517  */
518 void ModelChecker::print_execution(bool printbugs) const
519 {
520         print_program_output();
521
522         if (params.verbose) {
523                 model_print("Earliest divergence point since last feasible execution:\n");
524                 if (earliest_diverge)
525                         earliest_diverge->print();
526                 else
527                         model_print("(Not set)\n");
528
529                 model_print("\n");
530                 print_stats();
531         }
532
533         /* Don't print invalid bugs */
534         if (printbugs)
535                 print_bugs();
536
537         model_print("\n");
538         print_summary();
539 }
540
541 /**
542  * Queries the model-checker for more executions to explore and, if one
543  * exists, resets the model-checker state to execute a new execution.
544  *
545  * @return If there are more executions to explore, return true. Otherwise,
546  * return false.
547  */
548 bool ModelChecker::next_execution()
549 {
550         DBG();
551         /* Is this execution a feasible execution that's worth bug-checking? */
552         bool complete = isfeasibleprefix() && (is_complete_execution() ||
553                         have_bug_reports());
554
555         /* End-of-execution bug checks */
556         if (complete) {
557                 if (is_deadlocked())
558                         assert_bug("Deadlock detected");
559
560                 checkDataRaces();
561                 run_trace_analyses();
562         }
563
564         record_stats();
565
566         /* Output */
567         if (params.verbose || (complete && have_bug_reports()))
568                 print_execution(complete);
569         else
570                 clear_program_output();
571
572         if (complete)
573                 earliest_diverge = NULL;
574
575         if ((diverge = get_next_backtrack()) == NULL)
576                 return false;
577
578         if (DBG_ENABLED()) {
579                 model_print("Next execution will diverge at:\n");
580                 diverge->print();
581         }
582
583         reset_to_initial_state();
584         return true;
585 }
586
587 /**
588  * @brief Run trace analyses on complete trace. */
589
590 void ModelChecker::run_trace_analyses() {
591         for(unsigned int i=0; i < trace_analyses->size(); i++) {
592                 (*trace_analyses)[i]->analyze(action_trace);
593         }
594 }
595
596 /**
597  * @brief Find the last fence-related backtracking conflict for a ModelAction
598  *
599  * This function performs the search for the most recent conflicting action
600  * against which we should perform backtracking, as affected by fence
601  * operations. This includes pairs of potentially-synchronizing actions which
602  * occur due to fence-acquire or fence-release, and hence should be explored in
603  * the opposite execution order.
604  *
605  * @param act The current action
606  * @return The most recent action which conflicts with act due to fences
607  */
608 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
609 {
610         /* Only perform release/acquire fence backtracking for stores */
611         if (!act->is_write())
612                 return NULL;
613
614         /* Find a fence-release (or, act is a release) */
615         ModelAction *last_release;
616         if (act->is_release())
617                 last_release = act;
618         else
619                 last_release = get_last_fence_release(act->get_tid());
620         if (!last_release)
621                 return NULL;
622
623         /* Skip past the release */
624         action_list_t *list = action_trace;
625         action_list_t::reverse_iterator rit;
626         for (rit = list->rbegin(); rit != list->rend(); rit++)
627                 if (*rit == last_release)
628                         break;
629         ASSERT(rit != list->rend());
630
631         /* Find a prior:
632          *   load-acquire
633          * or
634          *   load --sb-> fence-acquire */
635         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
636         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
637         bool found_acquire_fences = false;
638         for ( ; rit != list->rend(); rit++) {
639                 ModelAction *prev = *rit;
640                 if (act->same_thread(prev))
641                         continue;
642
643                 int tid = id_to_int(prev->get_tid());
644
645                 if (prev->is_read() && act->same_var(prev)) {
646                         if (prev->is_acquire()) {
647                                 /* Found most recent load-acquire, don't need
648                                  * to search for more fences */
649                                 if (!found_acquire_fences)
650                                         return NULL;
651                         } else {
652                                 prior_loads[tid] = prev;
653                         }
654                 }
655                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
656                         found_acquire_fences = true;
657                         acquire_fences[tid] = prev;
658                 }
659         }
660
661         ModelAction *latest_backtrack = NULL;
662         for (unsigned int i = 0; i < acquire_fences.size(); i++)
663                 if (acquire_fences[i] && prior_loads[i])
664                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
665                                 latest_backtrack = acquire_fences[i];
666         return latest_backtrack;
667 }
668
669 /**
670  * @brief Find the last backtracking conflict for a ModelAction
671  *
672  * This function performs the search for the most recent conflicting action
673  * against which we should perform backtracking. This primary includes pairs of
674  * synchronizing actions which should be explored in the opposite execution
675  * order.
676  *
677  * @param act The current action
678  * @return The most recent action which conflicts with act
679  */
680 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
681 {
682         switch (act->get_type()) {
683         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
684         case ATOMIC_READ:
685         case ATOMIC_WRITE:
686         case ATOMIC_RMW: {
687                 ModelAction *ret = NULL;
688
689                 /* linear search: from most recent to oldest */
690                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
691                 action_list_t::reverse_iterator rit;
692                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
693                         ModelAction *prev = *rit;
694                         if (prev->could_synchronize_with(act)) {
695                                 ret = prev;
696                                 break;
697                         }
698                 }
699
700                 ModelAction *ret2 = get_last_fence_conflict(act);
701                 if (!ret2)
702                         return ret;
703                 if (!ret)
704                         return ret2;
705                 if (*ret < *ret2)
706                         return ret2;
707                 return ret;
708         }
709         case ATOMIC_LOCK:
710         case ATOMIC_TRYLOCK: {
711                 /* linear search: from most recent to oldest */
712                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
713                 action_list_t::reverse_iterator rit;
714                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
715                         ModelAction *prev = *rit;
716                         if (act->is_conflicting_lock(prev))
717                                 return prev;
718                 }
719                 break;
720         }
721         case ATOMIC_UNLOCK: {
722                 /* linear search: from most recent to oldest */
723                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
724                 action_list_t::reverse_iterator rit;
725                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
726                         ModelAction *prev = *rit;
727                         if (!act->same_thread(prev) && prev->is_failed_trylock())
728                                 return prev;
729                 }
730                 break;
731         }
732         case ATOMIC_WAIT: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_failed_trylock())
739                                 return prev;
740                         if (!act->same_thread(prev) && prev->is_notify())
741                                 return prev;
742                 }
743                 break;
744         }
745
746         case ATOMIC_NOTIFY_ALL:
747         case ATOMIC_NOTIFY_ONE: {
748                 /* linear search: from most recent to oldest */
749                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
750                 action_list_t::reverse_iterator rit;
751                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
752                         ModelAction *prev = *rit;
753                         if (!act->same_thread(prev) && prev->is_wait())
754                                 return prev;
755                 }
756                 break;
757         }
758         default:
759                 break;
760         }
761         return NULL;
762 }
763
764 /** This method finds backtracking points where we should try to
765  * reorder the parameter ModelAction against.
766  *
767  * @param the ModelAction to find backtracking points for.
768  */
769 void ModelChecker::set_backtracking(ModelAction *act)
770 {
771         Thread *t = get_thread(act);
772         ModelAction *prev = get_last_conflict(act);
773         if (prev == NULL)
774                 return;
775
776         Node *node = prev->get_node()->get_parent();
777
778         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
779         int low_tid, high_tid;
780         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
781                 low_tid = id_to_int(act->get_tid());
782                 high_tid = low_tid + 1;
783         } else {
784                 low_tid = 0;
785                 high_tid = get_num_threads();
786         }
787
788         for (int i = low_tid; i < high_tid; i++) {
789                 thread_id_t tid = int_to_id(i);
790
791                 /* Make sure this thread can be enabled here. */
792                 if (i >= node->get_num_threads())
793                         break;
794
795                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
796                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
797                 if (node->enabled_status(tid) != THREAD_ENABLED)
798                         continue;
799
800                 /* Check if this has been explored already */
801                 if (node->has_been_explored(tid))
802                         continue;
803
804                 /* See if fairness allows */
805                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
806                         bool unfair = false;
807                         for (int t = 0; t < node->get_num_threads(); t++) {
808                                 thread_id_t tother = int_to_id(t);
809                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
810                                         unfair = true;
811                                         break;
812                                 }
813                         }
814                         if (unfair)
815                                 continue;
816                 }
817
818                 /* See if CHESS-like yield fairness allows */
819                 if (model->params.yieldon) {
820                         bool unfair = false;
821                         for (int t = 0; t < node->get_num_threads(); t++) {
822                                 thread_id_t tother = int_to_id(t);
823                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
824                                         unfair = true;
825                                         break;
826                                 }
827                         }
828                         if (unfair)
829                                 continue;
830                 }
831                 
832                 /* Cache the latest backtracking point */
833                 set_latest_backtrack(prev);
834
835                 /* If this is a new backtracking point, mark the tree */
836                 if (!node->set_backtrack(tid))
837                         continue;
838                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
839                                         id_to_int(prev->get_tid()),
840                                         id_to_int(t->get_id()));
841                 if (DBG_ENABLED()) {
842                         prev->print();
843                         act->print();
844                 }
845         }
846 }
847
848 /**
849  * @brief Cache the a backtracking point as the "most recent", if eligible
850  *
851  * Note that this does not prepare the NodeStack for this backtracking
852  * operation, it only caches the action on a per-execution basis
853  *
854  * @param act The operation at which we should explore a different next action
855  * (i.e., backtracking point)
856  * @return True, if this action is now the most recent backtracking point;
857  * false otherwise
858  */
859 bool ModelChecker::set_latest_backtrack(ModelAction *act)
860 {
861         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
862                 priv->next_backtrack = act;
863                 return true;
864         }
865         return false;
866 }
867
868 /**
869  * Returns last backtracking point. The model checker will explore a different
870  * path for this point in the next execution.
871  * @return The ModelAction at which the next execution should diverge.
872  */
873 ModelAction * ModelChecker::get_next_backtrack()
874 {
875         ModelAction *next = priv->next_backtrack;
876         priv->next_backtrack = NULL;
877         return next;
878 }
879
880 /**
881  * Processes a read model action.
882  * @param curr is the read model action to process.
883  * @return True if processing this read updates the mo_graph.
884  */
885 bool ModelChecker::process_read(ModelAction *curr)
886 {
887         Node *node = curr->get_node();
888         while (true) {
889                 bool updated = false;
890                 switch (node->get_read_from_status()) {
891                 case READ_FROM_PAST: {
892                         const ModelAction *rf = node->get_read_from_past();
893                         ASSERT(rf);
894
895                         mo_graph->startChanges();
896
897                         ASSERT(!is_infeasible());
898                         if (!check_recency(curr, rf)) {
899                                 if (node->increment_read_from()) {
900                                         mo_graph->rollbackChanges();
901                                         continue;
902                                 } else {
903                                         priv->too_many_reads = true;
904                                 }
905                         }
906
907                         updated = r_modification_order(curr, rf);
908                         read_from(curr, rf);
909                         mo_graph->commitChanges();
910                         mo_check_promises(curr, true);
911                         break;
912                 }
913                 case READ_FROM_PROMISE: {
914                         Promise *promise = curr->get_node()->get_read_from_promise();
915                         if (promise->add_reader(curr))
916                                 priv->failed_promise = true;
917                         curr->set_read_from_promise(promise);
918                         mo_graph->startChanges();
919                         if (!check_recency(curr, promise))
920                                 priv->too_many_reads = true;
921                         updated = r_modification_order(curr, promise);
922                         mo_graph->commitChanges();
923                         break;
924                 }
925                 case READ_FROM_FUTURE: {
926                         /* Read from future value */
927                         struct future_value fv = node->get_future_value();
928                         Promise *promise = new Promise(curr, fv);
929                         curr->set_read_from_promise(promise);
930                         promises->push_back(promise);
931                         mo_graph->startChanges();
932                         updated = r_modification_order(curr, promise);
933                         mo_graph->commitChanges();
934                         break;
935                 }
936                 default:
937                         ASSERT(false);
938                 }
939                 get_thread(curr)->set_return_value(curr->get_return_value());
940                 return updated;
941         }
942 }
943
944 /**
945  * Processes a lock, trylock, or unlock model action.  @param curr is
946  * the read model action to process.
947  *
948  * The try lock operation checks whether the lock is taken.  If not,
949  * it falls to the normal lock operation case.  If so, it returns
950  * fail.
951  *
952  * The lock operation has already been checked that it is enabled, so
953  * it just grabs the lock and synchronizes with the previous unlock.
954  *
955  * The unlock operation has to re-enable all of the threads that are
956  * waiting on the lock.
957  *
958  * @return True if synchronization was updated; false otherwise
959  */
960 bool ModelChecker::process_mutex(ModelAction *curr)
961 {
962         std::mutex *mutex = curr->get_mutex();
963         struct std::mutex_state *state = NULL;
964
965         if (mutex)
966                 state = mutex->get_state();
967
968         switch (curr->get_type()) {
969         case ATOMIC_TRYLOCK: {
970                 bool success = !state->locked;
971                 curr->set_try_lock(success);
972                 if (!success) {
973                         get_thread(curr)->set_return_value(0);
974                         break;
975                 }
976                 get_thread(curr)->set_return_value(1);
977         }
978                 //otherwise fall into the lock case
979         case ATOMIC_LOCK: {
980                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
981                         assert_bug("Lock access before initialization");
982                 state->locked = get_thread(curr);
983                 ModelAction *unlock = get_last_unlock(curr);
984                 //synchronize with the previous unlock statement
985                 if (unlock != NULL) {
986                         curr->synchronize_with(unlock);
987                         return true;
988                 }
989                 break;
990         }
991         case ATOMIC_WAIT:
992         case ATOMIC_UNLOCK: {
993                 /* wake up the other threads */
994                 for (unsigned int i = 0; i < get_num_threads(); i++) {
995                         Thread *t = get_thread(int_to_id(i));
996                         Thread *curr_thrd = get_thread(curr);
997                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
998                                 scheduler->wake(t);
999                 }
1000
1001                 /* unlock the lock - after checking who was waiting on it */
1002                 state->locked = NULL;
1003
1004                 if (!curr->is_wait())
1005                         break; /* The rest is only for ATOMIC_WAIT */
1006
1007                 /* Should we go to sleep? (simulate spurious failures) */
1008                 if (curr->get_node()->get_misc() == 0) {
1009                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1010                         /* disable us */
1011                         scheduler->sleep(get_thread(curr));
1012                 }
1013                 break;
1014         }
1015         case ATOMIC_NOTIFY_ALL: {
1016                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1017                 //activate all the waiting threads
1018                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1019                         scheduler->wake(get_thread(*rit));
1020                 }
1021                 waiters->clear();
1022                 break;
1023         }
1024         case ATOMIC_NOTIFY_ONE: {
1025                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1026                 int wakeupthread = curr->get_node()->get_misc();
1027                 action_list_t::iterator it = waiters->begin();
1028                 advance(it, wakeupthread);
1029                 scheduler->wake(get_thread(*it));
1030                 waiters->erase(it);
1031                 break;
1032         }
1033
1034         default:
1035                 ASSERT(0);
1036         }
1037         return false;
1038 }
1039
1040 /**
1041  * @brief Check if the current pending promises allow a future value to be sent
1042  *
1043  * If one of the following is true:
1044  *  (a) there are no pending promises
1045  *  (b) the reader and writer do not cross any promises
1046  * Then, it is safe to pass a future value back now.
1047  *
1048  * Otherwise, we must save the pending future value until (a) or (b) is true
1049  *
1050  * @param writer The operation which sends the future value. Must be a write.
1051  * @param reader The operation which will observe the value. Must be a read.
1052  * @return True if the future value can be sent now; false if it must wait.
1053  */
1054 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1055                 const ModelAction *reader) const
1056 {
1057         if (promises->empty())
1058                 return true;
1059         for(int i=promises->size()-1;i>=0;i--) {
1060                 ModelAction *pr=(*promises)[i]->get_reader(0);
1061                 //reader is after promise...doesn't cross any promise
1062                 if (*reader > *pr)
1063                         return true;
1064                 //writer is after promise, reader before...bad...
1065                 if (*writer > *pr)
1066                         return false;
1067         }
1068         return true;
1069 }
1070
1071 /**
1072  * @brief Add a future value to a reader
1073  *
1074  * This function performs a few additional checks to ensure that the future
1075  * value can be feasibly observed by the reader
1076  *
1077  * @param writer The operation whose value is sent. Must be a write.
1078  * @param reader The read operation which may read the future value. Must be a read.
1079  */
1080 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1081 {
1082         /* Do more ambitious checks now that mo is more complete */
1083         if (!mo_may_allow(writer, reader))
1084                 return;
1085
1086         Node *node = reader->get_node();
1087
1088         /* Find an ancestor thread which exists at the time of the reader */
1089         Thread *write_thread = get_thread(writer);
1090         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1091                 write_thread = write_thread->get_parent();
1092
1093         struct future_value fv = {
1094                 writer->get_write_value(),
1095                 writer->get_seq_number() + params.maxfuturedelay,
1096                 write_thread->get_id(),
1097         };
1098         if (node->add_future_value(fv))
1099                 set_latest_backtrack(reader);
1100 }
1101
1102 /**
1103  * Process a write ModelAction
1104  * @param curr The ModelAction to process
1105  * @return True if the mo_graph was updated or promises were resolved
1106  */
1107 bool ModelChecker::process_write(ModelAction *curr)
1108 {
1109         /* Readers to which we may send our future value */
1110         ModelVector<ModelAction *> send_fv;
1111
1112         const ModelAction *earliest_promise_reader;
1113         bool updated_promises = false;
1114
1115         bool updated_mod_order = w_modification_order(curr, &send_fv);
1116         Promise *promise = pop_promise_to_resolve(curr);
1117
1118         if (promise) {
1119                 earliest_promise_reader = promise->get_reader(0);
1120                 updated_promises = resolve_promise(curr, promise);
1121         } else
1122                 earliest_promise_reader = NULL;
1123
1124         for (unsigned int i = 0; i < send_fv.size(); i++) {
1125                 ModelAction *read = send_fv[i];
1126
1127                 /* Don't send future values to reads after the Promise we resolve */
1128                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1129                         /* Check if future value can be sent immediately */
1130                         if (promises_may_allow(curr, read)) {
1131                                 add_future_value(curr, read);
1132                         } else {
1133                                 futurevalues->push_back(PendingFutureValue(curr, read));
1134                         }
1135                 }
1136         }
1137
1138         /* Check the pending future values */
1139         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1140                 struct PendingFutureValue pfv = (*futurevalues)[i];
1141                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1142                         add_future_value(pfv.writer, pfv.reader);
1143                         futurevalues->erase(futurevalues->begin() + i);
1144                 }
1145         }
1146
1147         mo_graph->commitChanges();
1148         mo_check_promises(curr, false);
1149
1150         get_thread(curr)->set_return_value(VALUE_NONE);
1151         return updated_mod_order || updated_promises;
1152 }
1153
1154 /**
1155  * Process a fence ModelAction
1156  * @param curr The ModelAction to process
1157  * @return True if synchronization was updated
1158  */
1159 bool ModelChecker::process_fence(ModelAction *curr)
1160 {
1161         /*
1162          * fence-relaxed: no-op
1163          * fence-release: only log the occurence (not in this function), for
1164          *   use in later synchronization
1165          * fence-acquire (this function): search for hypothetical release
1166          *   sequences
1167          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1168          */
1169         bool updated = false;
1170         if (curr->is_acquire()) {
1171                 action_list_t *list = action_trace;
1172                 action_list_t::reverse_iterator rit;
1173                 /* Find X : is_read(X) && X --sb-> curr */
1174                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1175                         ModelAction *act = *rit;
1176                         if (act == curr)
1177                                 continue;
1178                         if (act->get_tid() != curr->get_tid())
1179                                 continue;
1180                         /* Stop at the beginning of the thread */
1181                         if (act->is_thread_start())
1182                                 break;
1183                         /* Stop once we reach a prior fence-acquire */
1184                         if (act->is_fence() && act->is_acquire())
1185                                 break;
1186                         if (!act->is_read())
1187                                 continue;
1188                         /* read-acquire will find its own release sequences */
1189                         if (act->is_acquire())
1190                                 continue;
1191
1192                         /* Establish hypothetical release sequences */
1193                         rel_heads_list_t release_heads;
1194                         get_release_seq_heads(curr, act, &release_heads);
1195                         for (unsigned int i = 0; i < release_heads.size(); i++)
1196                                 if (!curr->synchronize_with(release_heads[i]))
1197                                         set_bad_synchronization();
1198                         if (release_heads.size() != 0)
1199                                 updated = true;
1200                 }
1201         }
1202         return updated;
1203 }
1204
1205 /**
1206  * @brief Process the current action for thread-related activity
1207  *
1208  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1209  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1210  * synchronization, etc.  This function is a no-op for non-THREAD actions
1211  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1212  *
1213  * @param curr The current action
1214  * @return True if synchronization was updated or a thread completed
1215  */
1216 bool ModelChecker::process_thread_action(ModelAction *curr)
1217 {
1218         bool updated = false;
1219
1220         switch (curr->get_type()) {
1221         case THREAD_CREATE: {
1222                 thrd_t *thrd = (thrd_t *)curr->get_location();
1223                 struct thread_params *params = (struct thread_params *)curr->get_value();
1224                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1225                 add_thread(th);
1226                 th->set_creation(curr);
1227                 /* Promises can be satisfied by children */
1228                 for (unsigned int i = 0; i < promises->size(); i++) {
1229                         Promise *promise = (*promises)[i];
1230                         if (promise->thread_is_available(curr->get_tid()))
1231                                 promise->add_thread(th->get_id());
1232                 }
1233                 break;
1234         }
1235         case THREAD_JOIN: {
1236                 Thread *blocking = curr->get_thread_operand();
1237                 ModelAction *act = get_last_action(blocking->get_id());
1238                 curr->synchronize_with(act);
1239                 updated = true; /* trigger rel-seq checks */
1240                 break;
1241         }
1242         case THREAD_FINISH: {
1243                 Thread *th = get_thread(curr);
1244                 /* Wake up any joining threads */
1245                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1246                         Thread *waiting = get_thread(int_to_id(i));
1247                         if (waiting->waiting_on() == th &&
1248                                         waiting->get_pending()->is_thread_join())
1249                                 scheduler->wake(waiting);
1250                 }
1251                 th->complete();
1252                 /* Completed thread can't satisfy promises */
1253                 for (unsigned int i = 0; i < promises->size(); i++) {
1254                         Promise *promise = (*promises)[i];
1255                         if (promise->thread_is_available(th->get_id()))
1256                                 if (promise->eliminate_thread(th->get_id()))
1257                                         priv->failed_promise = true;
1258                 }
1259                 updated = true; /* trigger rel-seq checks */
1260                 break;
1261         }
1262         case THREAD_START: {
1263                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1264                 break;
1265         }
1266         default:
1267                 break;
1268         }
1269
1270         return updated;
1271 }
1272
1273 /**
1274  * @brief Process the current action for release sequence fixup activity
1275  *
1276  * Performs model-checker release sequence fixups for the current action,
1277  * forcing a single pending release sequence to break (with a given, potential
1278  * "loose" write) or to complete (i.e., synchronize). If a pending release
1279  * sequence forms a complete release sequence, then we must perform the fixup
1280  * synchronization, mo_graph additions, etc.
1281  *
1282  * @param curr The current action; must be a release sequence fixup action
1283  * @param work_queue The work queue to which to add work items as they are
1284  * generated
1285  */
1286 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1287 {
1288         const ModelAction *write = curr->get_node()->get_relseq_break();
1289         struct release_seq *sequence = pending_rel_seqs->back();
1290         pending_rel_seqs->pop_back();
1291         ASSERT(sequence);
1292         ModelAction *acquire = sequence->acquire;
1293         const ModelAction *rf = sequence->rf;
1294         const ModelAction *release = sequence->release;
1295         ASSERT(acquire);
1296         ASSERT(release);
1297         ASSERT(rf);
1298         ASSERT(release->same_thread(rf));
1299
1300         if (write == NULL) {
1301                 /**
1302                  * @todo Forcing a synchronization requires that we set
1303                  * modification order constraints. For instance, we can't allow
1304                  * a fixup sequence in which two separate read-acquire
1305                  * operations read from the same sequence, where the first one
1306                  * synchronizes and the other doesn't. Essentially, we can't
1307                  * allow any writes to insert themselves between 'release' and
1308                  * 'rf'
1309                  */
1310
1311                 /* Must synchronize */
1312                 if (!acquire->synchronize_with(release)) {
1313                         set_bad_synchronization();
1314                         return;
1315                 }
1316                 /* Re-check all pending release sequences */
1317                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1318                 /* Re-check act for mo_graph edges */
1319                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1320
1321                 /* propagate synchronization to later actions */
1322                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1323                 for (; (*rit) != acquire; rit++) {
1324                         ModelAction *propagate = *rit;
1325                         if (acquire->happens_before(propagate)) {
1326                                 propagate->synchronize_with(acquire);
1327                                 /* Re-check 'propagate' for mo_graph edges */
1328                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1329                         }
1330                 }
1331         } else {
1332                 /* Break release sequence with new edges:
1333                  *   release --mo--> write --mo--> rf */
1334                 mo_graph->addEdge(release, write);
1335                 mo_graph->addEdge(write, rf);
1336         }
1337
1338         /* See if we have realized a data race */
1339         checkDataRaces();
1340 }
1341
1342 /**
1343  * Initialize the current action by performing one or more of the following
1344  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1345  * in the NodeStack, manipulating backtracking sets, allocating and
1346  * initializing clock vectors, and computing the promises to fulfill.
1347  *
1348  * @param curr The current action, as passed from the user context; may be
1349  * freed/invalidated after the execution of this function, with a different
1350  * action "returned" its place (pass-by-reference)
1351  * @return True if curr is a newly-explored action; false otherwise
1352  */
1353 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1354 {
1355         ModelAction *newcurr;
1356
1357         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1358                 newcurr = process_rmw(*curr);
1359                 delete *curr;
1360
1361                 if (newcurr->is_rmw())
1362                         compute_promises(newcurr);
1363
1364                 *curr = newcurr;
1365                 return false;
1366         }
1367
1368         (*curr)->set_seq_number(get_next_seq_num());
1369
1370         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1371         if (newcurr) {
1372                 /* First restore type and order in case of RMW operation */
1373                 if ((*curr)->is_rmwr())
1374                         newcurr->copy_typeandorder(*curr);
1375
1376                 ASSERT((*curr)->get_location() == newcurr->get_location());
1377                 newcurr->copy_from_new(*curr);
1378
1379                 /* Discard duplicate ModelAction; use action from NodeStack */
1380                 delete *curr;
1381
1382                 /* Always compute new clock vector */
1383                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1384
1385                 *curr = newcurr;
1386                 return false; /* Action was explored previously */
1387         } else {
1388                 newcurr = *curr;
1389
1390                 /* Always compute new clock vector */
1391                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1392
1393                 /* Assign most recent release fence */
1394                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1395
1396                 /*
1397                  * Perform one-time actions when pushing new ModelAction onto
1398                  * NodeStack
1399                  */
1400                 if (newcurr->is_write())
1401                         compute_promises(newcurr);
1402                 else if (newcurr->is_relseq_fixup())
1403                         compute_relseq_breakwrites(newcurr);
1404                 else if (newcurr->is_wait())
1405                         newcurr->get_node()->set_misc_max(2);
1406                 else if (newcurr->is_notify_one()) {
1407                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1408                 }
1409                 return true; /* This was a new ModelAction */
1410         }
1411 }
1412
1413 /**
1414  * @brief Establish reads-from relation between two actions
1415  *
1416  * Perform basic operations involved with establishing a concrete rf relation,
1417  * including setting the ModelAction data and checking for release sequences.
1418  *
1419  * @param act The action that is reading (must be a read)
1420  * @param rf The action from which we are reading (must be a write)
1421  *
1422  * @return True if this read established synchronization
1423  */
1424 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1425 {
1426         ASSERT(rf);
1427         ASSERT(rf->is_write());
1428
1429         act->set_read_from(rf);
1430         if (act->is_acquire()) {
1431                 rel_heads_list_t release_heads;
1432                 get_release_seq_heads(act, act, &release_heads);
1433                 int num_heads = release_heads.size();
1434                 for (unsigned int i = 0; i < release_heads.size(); i++)
1435                         if (!act->synchronize_with(release_heads[i])) {
1436                                 set_bad_synchronization();
1437                                 num_heads--;
1438                         }
1439                 return num_heads > 0;
1440         }
1441         return false;
1442 }
1443
1444 /**
1445  * Check promises and eliminate potentially-satisfying threads when a thread is
1446  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1447  * no longer satisfy a promise generated from that thread.
1448  *
1449  * @param blocker The thread on which a thread is waiting
1450  * @param waiting The waiting thread
1451  */
1452 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1453 {
1454         for (unsigned int i = 0; i < promises->size(); i++) {
1455                 Promise *promise = (*promises)[i];
1456                 if (!promise->thread_is_available(waiting->get_id()))
1457                         continue;
1458                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1459                         ModelAction *reader = promise->get_reader(j);
1460                         if (reader->get_tid() != blocker->get_id())
1461                                 continue;
1462                         if (promise->eliminate_thread(waiting->get_id())) {
1463                                 /* Promise has failed */
1464                                 priv->failed_promise = true;
1465                         } else {
1466                                 /* Only eliminate the 'waiting' thread once */
1467                                 return;
1468                         }
1469                 }
1470         }
1471 }
1472
1473 /**
1474  * @brief Check whether a model action is enabled.
1475  *
1476  * Checks whether a lock or join operation would be successful (i.e., is the
1477  * lock already locked, or is the joined thread already complete). If not, put
1478  * the action in a waiter list.
1479  *
1480  * @param curr is the ModelAction to check whether it is enabled.
1481  * @return a bool that indicates whether the action is enabled.
1482  */
1483 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1484         if (curr->is_lock()) {
1485                 std::mutex *lock = curr->get_mutex();
1486                 struct std::mutex_state *state = lock->get_state();
1487                 if (state->locked)
1488                         return false;
1489         } else if (curr->is_thread_join()) {
1490                 Thread *blocking = curr->get_thread_operand();
1491                 if (!blocking->is_complete()) {
1492                         thread_blocking_check_promises(blocking, get_thread(curr));
1493                         return false;
1494                 }
1495         }
1496
1497         return true;
1498 }
1499
1500 /**
1501  * This is the heart of the model checker routine. It performs model-checking
1502  * actions corresponding to a given "current action." Among other processes, it
1503  * calculates reads-from relationships, updates synchronization clock vectors,
1504  * forms a memory_order constraints graph, and handles replay/backtrack
1505  * execution when running permutations of previously-observed executions.
1506  *
1507  * @param curr The current action to process
1508  * @return The ModelAction that is actually executed; may be different than
1509  * curr; may be NULL, if the current action is not enabled to run
1510  */
1511 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1512 {
1513         ASSERT(curr);
1514         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1515         bool newly_explored = initialize_curr_action(&curr);
1516
1517         DBG();
1518
1519         wake_up_sleeping_actions(curr);
1520
1521         /* Compute fairness information for CHESS yield algorithm */
1522         if (model->params.yieldon) {
1523                 curr->get_node()->update_yield(scheduler);
1524         }
1525
1526         /* Add the action to lists before any other model-checking tasks */
1527         if (!second_part_of_rmw)
1528                 add_action_to_lists(curr);
1529
1530         /* Build may_read_from set for newly-created actions */
1531         if (newly_explored && curr->is_read())
1532                 build_may_read_from(curr);
1533
1534         /* Initialize work_queue with the "current action" work */
1535         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1536         while (!work_queue.empty() && !has_asserted()) {
1537                 WorkQueueEntry work = work_queue.front();
1538                 work_queue.pop_front();
1539
1540                 switch (work.type) {
1541                 case WORK_CHECK_CURR_ACTION: {
1542                         ModelAction *act = work.action;
1543                         bool update = false; /* update this location's release seq's */
1544                         bool update_all = false; /* update all release seq's */
1545
1546                         if (process_thread_action(curr))
1547                                 update_all = true;
1548
1549                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1550                                 update = true;
1551
1552                         if (act->is_write() && process_write(act))
1553                                 update = true;
1554
1555                         if (act->is_fence() && process_fence(act))
1556                                 update_all = true;
1557
1558                         if (act->is_mutex_op() && process_mutex(act))
1559                                 update_all = true;
1560
1561                         if (act->is_relseq_fixup())
1562                                 process_relseq_fixup(curr, &work_queue);
1563
1564                         if (update_all)
1565                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1566                         else if (update)
1567                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1568                         break;
1569                 }
1570                 case WORK_CHECK_RELEASE_SEQ:
1571                         resolve_release_sequences(work.location, &work_queue);
1572                         break;
1573                 case WORK_CHECK_MO_EDGES: {
1574                         /** @todo Complete verification of work_queue */
1575                         ModelAction *act = work.action;
1576                         bool updated = false;
1577
1578                         if (act->is_read()) {
1579                                 const ModelAction *rf = act->get_reads_from();
1580                                 const Promise *promise = act->get_reads_from_promise();
1581                                 if (rf) {
1582                                         if (r_modification_order(act, rf))
1583                                                 updated = true;
1584                                 } else if (promise) {
1585                                         if (r_modification_order(act, promise))
1586                                                 updated = true;
1587                                 }
1588                         }
1589                         if (act->is_write()) {
1590                                 if (w_modification_order(act, NULL))
1591                                         updated = true;
1592                         }
1593                         mo_graph->commitChanges();
1594
1595                         if (updated)
1596                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1597                         break;
1598                 }
1599                 default:
1600                         ASSERT(false);
1601                         break;
1602                 }
1603         }
1604
1605         check_curr_backtracking(curr);
1606         set_backtracking(curr);
1607         return curr;
1608 }
1609
1610 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1611 {
1612         Node *currnode = curr->get_node();
1613         Node *parnode = currnode->get_parent();
1614
1615         if ((parnode && !parnode->backtrack_empty()) ||
1616                          !currnode->misc_empty() ||
1617                          !currnode->read_from_empty() ||
1618                          !currnode->promise_empty() ||
1619                          !currnode->relseq_break_empty()) {
1620                 set_latest_backtrack(curr);
1621         }
1622 }
1623
1624 bool ModelChecker::promises_expired() const
1625 {
1626         for (unsigned int i = 0; i < promises->size(); i++) {
1627                 Promise *promise = (*promises)[i];
1628                 if (promise->get_expiration() < priv->used_sequence_numbers)
1629                         return true;
1630         }
1631         return false;
1632 }
1633
1634 /**
1635  * This is the strongest feasibility check available.
1636  * @return whether the current trace (partial or complete) must be a prefix of
1637  * a feasible trace.
1638  */
1639 bool ModelChecker::isfeasibleprefix() const
1640 {
1641         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1642 }
1643
1644 /**
1645  * Print disagnostic information about an infeasible execution
1646  * @param prefix A string to prefix the output with; if NULL, then a default
1647  * message prefix will be provided
1648  */
1649 void ModelChecker::print_infeasibility(const char *prefix) const
1650 {
1651         char buf[100];
1652         char *ptr = buf;
1653         if (mo_graph->checkForCycles())
1654                 ptr += sprintf(ptr, "[mo cycle]");
1655         if (priv->failed_promise)
1656                 ptr += sprintf(ptr, "[failed promise]");
1657         if (priv->too_many_reads)
1658                 ptr += sprintf(ptr, "[too many reads]");
1659         if (priv->no_valid_reads)
1660                 ptr += sprintf(ptr, "[no valid reads-from]");
1661         if (priv->bad_synchronization)
1662                 ptr += sprintf(ptr, "[bad sw ordering]");
1663         if (promises_expired())
1664                 ptr += sprintf(ptr, "[promise expired]");
1665         if (promises->size() != 0)
1666                 ptr += sprintf(ptr, "[unresolved promise]");
1667         if (ptr != buf)
1668                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1669 }
1670
1671 /**
1672  * Returns whether the current completed trace is feasible, except for pending
1673  * release sequences.
1674  */
1675 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1676 {
1677         return !is_infeasible() && promises->size() == 0;
1678 }
1679
1680 /**
1681  * Check if the current partial trace is infeasible. Does not check any
1682  * end-of-execution flags, which might rule out the execution. Thus, this is
1683  * useful only for ruling an execution as infeasible.
1684  * @return whether the current partial trace is infeasible.
1685  */
1686 bool ModelChecker::is_infeasible() const
1687 {
1688         return mo_graph->checkForCycles() ||
1689                 priv->no_valid_reads ||
1690                 priv->failed_promise ||
1691                 priv->too_many_reads ||
1692                 priv->bad_synchronization ||
1693                 promises_expired();
1694 }
1695
1696 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1697 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1698         ModelAction *lastread = get_last_action(act->get_tid());
1699         lastread->process_rmw(act);
1700         if (act->is_rmw()) {
1701                 if (lastread->get_reads_from())
1702                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1703                 else
1704                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1705                 mo_graph->commitChanges();
1706         }
1707         return lastread;
1708 }
1709
1710 /**
1711  * A helper function for ModelChecker::check_recency, to check if the current
1712  * thread is able to read from a different write/promise for 'params.maxreads'
1713  * number of steps and if that write/promise should become visible (i.e., is
1714  * ordered later in the modification order). This helps model memory liveness.
1715  *
1716  * @param curr The current action. Must be a read.
1717  * @param rf The write/promise from which we plan to read
1718  * @param other_rf The write/promise from which we may read
1719  * @return True if we were able to read from other_rf for params.maxreads steps
1720  */
1721 template <typename T, typename U>
1722 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1723 {
1724         /* Need a different write/promise */
1725         if (other_rf->equals(rf))
1726                 return false;
1727
1728         /* Only look for "newer" writes/promises */
1729         if (!mo_graph->checkReachable(rf, other_rf))
1730                 return false;
1731
1732         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1733         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1734         action_list_t::reverse_iterator rit = list->rbegin();
1735         ASSERT((*rit) == curr);
1736         /* Skip past curr */
1737         rit++;
1738
1739         /* Does this write/promise work for everyone? */
1740         for (int i = 0; i < params.maxreads; i++, rit++) {
1741                 ModelAction *act = *rit;
1742                 if (!act->may_read_from(other_rf))
1743                         return false;
1744         }
1745         return true;
1746 }
1747
1748 /**
1749  * Checks whether a thread has read from the same write or Promise for too many
1750  * times without seeing the effects of a later write/Promise.
1751  *
1752  * Basic idea:
1753  * 1) there must a different write/promise that we could read from,
1754  * 2) we must have read from the same write/promise in excess of maxreads times,
1755  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1756  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1757  *
1758  * If so, we decide that the execution is no longer feasible.
1759  *
1760  * @param curr The current action. Must be a read.
1761  * @param rf The ModelAction/Promise from which we might read.
1762  * @return True if the read should succeed; false otherwise
1763  */
1764 template <typename T>
1765 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1766 {
1767         if (!params.maxreads)
1768                 return true;
1769
1770         //NOTE: Next check is just optimization, not really necessary....
1771         if (curr->get_node()->get_read_from_past_size() +
1772                         curr->get_node()->get_read_from_promise_size() <= 1)
1773                 return true;
1774
1775         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1776         int tid = id_to_int(curr->get_tid());
1777         ASSERT(tid < (int)thrd_lists->size());
1778         action_list_t *list = &(*thrd_lists)[tid];
1779         action_list_t::reverse_iterator rit = list->rbegin();
1780         ASSERT((*rit) == curr);
1781         /* Skip past curr */
1782         rit++;
1783
1784         action_list_t::reverse_iterator ritcopy = rit;
1785         /* See if we have enough reads from the same value */
1786         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1787                 if (ritcopy == list->rend())
1788                         return true;
1789                 ModelAction *act = *ritcopy;
1790                 if (!act->is_read())
1791                         return true;
1792                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1793                         return true;
1794                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1795                         return true;
1796                 if (act->get_node()->get_read_from_past_size() +
1797                                 act->get_node()->get_read_from_promise_size() <= 1)
1798                         return true;
1799         }
1800         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1801                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1802                 if (should_read_instead(curr, rf, write))
1803                         return false; /* liveness failure */
1804         }
1805         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1806                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1807                 if (should_read_instead(curr, rf, promise))
1808                         return false; /* liveness failure */
1809         }
1810         return true;
1811 }
1812
1813 /**
1814  * @brief Updates the mo_graph with the constraints imposed from the current
1815  * read.
1816  *
1817  * Basic idea is the following: Go through each other thread and find
1818  * the last action that happened before our read.  Two cases:
1819  *
1820  * -# The action is a write: that write must either occur before
1821  * the write we read from or be the write we read from.
1822  * -# The action is a read: the write that that action read from
1823  * must occur before the write we read from or be the same write.
1824  *
1825  * @param curr The current action. Must be a read.
1826  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1827  * @return True if modification order edges were added; false otherwise
1828  */
1829 template <typename rf_type>
1830 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1831 {
1832         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1833         unsigned int i;
1834         bool added = false;
1835         ASSERT(curr->is_read());
1836
1837         /* Last SC fence in the current thread */
1838         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1839         ModelAction *last_sc_write = NULL;
1840         if (curr->is_seqcst())
1841                 last_sc_write = get_last_seq_cst_write(curr);
1842
1843         /* Iterate over all threads */
1844         for (i = 0; i < thrd_lists->size(); i++) {
1845                 /* Last SC fence in thread i */
1846                 ModelAction *last_sc_fence_thread_local = NULL;
1847                 if (int_to_id((int)i) != curr->get_tid())
1848                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1849
1850                 /* Last SC fence in thread i, before last SC fence in current thread */
1851                 ModelAction *last_sc_fence_thread_before = NULL;
1852                 if (last_sc_fence_local)
1853                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1854
1855                 /* Iterate over actions in thread, starting from most recent */
1856                 action_list_t *list = &(*thrd_lists)[i];
1857                 action_list_t::reverse_iterator rit;
1858                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1859                         ModelAction *act = *rit;
1860
1861                         /* Skip curr */
1862                         if (act == curr)
1863                                 continue;
1864                         /* Don't want to add reflexive edges on 'rf' */
1865                         if (act->equals(rf)) {
1866                                 if (act->happens_before(curr))
1867                                         break;
1868                                 else
1869                                         continue;
1870                         }
1871
1872                         if (act->is_write()) {
1873                                 /* C++, Section 29.3 statement 5 */
1874                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1875                                                 *act < *last_sc_fence_thread_local) {
1876                                         added = mo_graph->addEdge(act, rf) || added;
1877                                         break;
1878                                 }
1879                                 /* C++, Section 29.3 statement 4 */
1880                                 else if (act->is_seqcst() && last_sc_fence_local &&
1881                                                 *act < *last_sc_fence_local) {
1882                                         added = mo_graph->addEdge(act, rf) || added;
1883                                         break;
1884                                 }
1885                                 /* C++, Section 29.3 statement 6 */
1886                                 else if (last_sc_fence_thread_before &&
1887                                                 *act < *last_sc_fence_thread_before) {
1888                                         added = mo_graph->addEdge(act, rf) || added;
1889                                         break;
1890                                 }
1891                         }
1892
1893                         /* C++, Section 29.3 statement 3 (second subpoint) */
1894                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1895                                 added = mo_graph->addEdge(act, rf) || added;
1896                                 break;
1897                         }
1898
1899                         /*
1900                          * Include at most one act per-thread that "happens
1901                          * before" curr
1902                          */
1903                         if (act->happens_before(curr)) {
1904                                 if (act->is_write()) {
1905                                         added = mo_graph->addEdge(act, rf) || added;
1906                                 } else {
1907                                         const ModelAction *prevrf = act->get_reads_from();
1908                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1909                                         if (prevrf) {
1910                                                 if (!prevrf->equals(rf))
1911                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1912                                         } else if (!prevrf_promise->equals(rf)) {
1913                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1914                                         }
1915                                 }
1916                                 break;
1917                         }
1918                 }
1919         }
1920
1921         /*
1922          * All compatible, thread-exclusive promises must be ordered after any
1923          * concrete loads from the same thread
1924          */
1925         for (unsigned int i = 0; i < promises->size(); i++)
1926                 if ((*promises)[i]->is_compatible_exclusive(curr))
1927                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1928
1929         return added;
1930 }
1931
1932 /**
1933  * Updates the mo_graph with the constraints imposed from the current write.
1934  *
1935  * Basic idea is the following: Go through each other thread and find
1936  * the lastest action that happened before our write.  Two cases:
1937  *
1938  * (1) The action is a write => that write must occur before
1939  * the current write
1940  *
1941  * (2) The action is a read => the write that that action read from
1942  * must occur before the current write.
1943  *
1944  * This method also handles two other issues:
1945  *
1946  * (I) Sequential Consistency: Making sure that if the current write is
1947  * seq_cst, that it occurs after the previous seq_cst write.
1948  *
1949  * (II) Sending the write back to non-synchronizing reads.
1950  *
1951  * @param curr The current action. Must be a write.
1952  * @param send_fv A vector for stashing reads to which we may pass our future
1953  * value. If NULL, then don't record any future values.
1954  * @return True if modification order edges were added; false otherwise
1955  */
1956 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1957 {
1958         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1959         unsigned int i;
1960         bool added = false;
1961         ASSERT(curr->is_write());
1962
1963         if (curr->is_seqcst()) {
1964                 /* We have to at least see the last sequentially consistent write,
1965                          so we are initialized. */
1966                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1967                 if (last_seq_cst != NULL) {
1968                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1969                 }
1970         }
1971
1972         /* Last SC fence in the current thread */
1973         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1974
1975         /* Iterate over all threads */
1976         for (i = 0; i < thrd_lists->size(); i++) {
1977                 /* Last SC fence in thread i, before last SC fence in current thread */
1978                 ModelAction *last_sc_fence_thread_before = NULL;
1979                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1980                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1981
1982                 /* Iterate over actions in thread, starting from most recent */
1983                 action_list_t *list = &(*thrd_lists)[i];
1984                 action_list_t::reverse_iterator rit;
1985                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1986                         ModelAction *act = *rit;
1987                         if (act == curr) {
1988                                 /*
1989                                  * 1) If RMW and it actually read from something, then we
1990                                  * already have all relevant edges, so just skip to next
1991                                  * thread.
1992                                  *
1993                                  * 2) If RMW and it didn't read from anything, we should
1994                                  * whatever edge we can get to speed up convergence.
1995                                  *
1996                                  * 3) If normal write, we need to look at earlier actions, so
1997                                  * continue processing list.
1998                                  */
1999                                 if (curr->is_rmw()) {
2000                                         if (curr->get_reads_from() != NULL)
2001                                                 break;
2002                                         else
2003                                                 continue;
2004                                 } else
2005                                         continue;
2006                         }
2007
2008                         /* C++, Section 29.3 statement 7 */
2009                         if (last_sc_fence_thread_before && act->is_write() &&
2010                                         *act < *last_sc_fence_thread_before) {
2011                                 added = mo_graph->addEdge(act, curr) || added;
2012                                 break;
2013                         }
2014
2015                         /*
2016                          * Include at most one act per-thread that "happens
2017                          * before" curr
2018                          */
2019                         if (act->happens_before(curr)) {
2020                                 /*
2021                                  * Note: if act is RMW, just add edge:
2022                                  *   act --mo--> curr
2023                                  * The following edge should be handled elsewhere:
2024                                  *   readfrom(act) --mo--> act
2025                                  */
2026                                 if (act->is_write())
2027                                         added = mo_graph->addEdge(act, curr) || added;
2028                                 else if (act->is_read()) {
2029                                         //if previous read accessed a null, just keep going
2030                                         if (act->get_reads_from() == NULL)
2031                                                 continue;
2032                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2033                                 }
2034                                 break;
2035                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2036                                                      !act->same_thread(curr)) {
2037                                 /* We have an action that:
2038                                    (1) did not happen before us
2039                                    (2) is a read and we are a write
2040                                    (3) cannot synchronize with us
2041                                    (4) is in a different thread
2042                                    =>
2043                                    that read could potentially read from our write.  Note that
2044                                    these checks are overly conservative at this point, we'll
2045                                    do more checks before actually removing the
2046                                    pendingfuturevalue.
2047
2048                                  */
2049                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2050                                         if (!is_infeasible())
2051                                                 send_fv->push_back(act);
2052                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2053                                                 add_future_value(curr, act);
2054                                 }
2055                         }
2056                 }
2057         }
2058
2059         /*
2060          * All compatible, thread-exclusive promises must be ordered after any
2061          * concrete stores to the same thread, or else they can be merged with
2062          * this store later
2063          */
2064         for (unsigned int i = 0; i < promises->size(); i++)
2065                 if ((*promises)[i]->is_compatible_exclusive(curr))
2066                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2067
2068         return added;
2069 }
2070
2071 /** Arbitrary reads from the future are not allowed.  Section 29.3
2072  * part 9 places some constraints.  This method checks one result of constraint
2073  * constraint.  Others require compiler support. */
2074 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2075 {
2076         if (!writer->is_rmw())
2077                 return true;
2078
2079         if (!reader->is_rmw())
2080                 return true;
2081
2082         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2083                 if (search == reader)
2084                         return false;
2085                 if (search->get_tid() == reader->get_tid() &&
2086                                 search->happens_before(reader))
2087                         break;
2088         }
2089
2090         return true;
2091 }
2092
2093 /**
2094  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2095  * some constraints. This method checks one the following constraint (others
2096  * require compiler support):
2097  *
2098  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2099  */
2100 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2101 {
2102         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2103         unsigned int i;
2104         /* Iterate over all threads */
2105         for (i = 0; i < thrd_lists->size(); i++) {
2106                 const ModelAction *write_after_read = NULL;
2107
2108                 /* Iterate over actions in thread, starting from most recent */
2109                 action_list_t *list = &(*thrd_lists)[i];
2110                 action_list_t::reverse_iterator rit;
2111                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2112                         ModelAction *act = *rit;
2113
2114                         /* Don't disallow due to act == reader */
2115                         if (!reader->happens_before(act) || reader == act)
2116                                 break;
2117                         else if (act->is_write())
2118                                 write_after_read = act;
2119                         else if (act->is_read() && act->get_reads_from() != NULL)
2120                                 write_after_read = act->get_reads_from();
2121                 }
2122
2123                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2124                         return false;
2125         }
2126         return true;
2127 }
2128
2129 /**
2130  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2131  * The ModelAction under consideration is expected to be taking part in
2132  * release/acquire synchronization as an object of the "reads from" relation.
2133  * Note that this can only provide release sequence support for RMW chains
2134  * which do not read from the future, as those actions cannot be traced until
2135  * their "promise" is fulfilled. Similarly, we may not even establish the
2136  * presence of a release sequence with certainty, as some modification order
2137  * constraints may be decided further in the future. Thus, this function
2138  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2139  * and a boolean representing certainty.
2140  *
2141  * @param rf The action that might be part of a release sequence. Must be a
2142  * write.
2143  * @param release_heads A pass-by-reference style return parameter. After
2144  * execution of this function, release_heads will contain the heads of all the
2145  * relevant release sequences, if any exists with certainty
2146  * @param pending A pass-by-reference style return parameter which is only used
2147  * when returning false (i.e., uncertain). Returns most information regarding
2148  * an uncertain release sequence, including any write operations that might
2149  * break the sequence.
2150  * @return true, if the ModelChecker is certain that release_heads is complete;
2151  * false otherwise
2152  */
2153 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2154                 rel_heads_list_t *release_heads,
2155                 struct release_seq *pending) const
2156 {
2157         /* Only check for release sequences if there are no cycles */
2158         if (mo_graph->checkForCycles())
2159                 return false;
2160
2161         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2162                 ASSERT(rf->is_write());
2163
2164                 if (rf->is_release())
2165                         release_heads->push_back(rf);
2166                 else if (rf->get_last_fence_release())
2167                         release_heads->push_back(rf->get_last_fence_release());
2168                 if (!rf->is_rmw())
2169                         break; /* End of RMW chain */
2170
2171                 /** @todo Need to be smarter here...  In the linux lock
2172                  * example, this will run to the beginning of the program for
2173                  * every acquire. */
2174                 /** @todo The way to be smarter here is to keep going until 1
2175                  * thread has a release preceded by an acquire and you've seen
2176                  *       both. */
2177
2178                 /* acq_rel RMW is a sufficient stopping condition */
2179                 if (rf->is_acquire() && rf->is_release())
2180                         return true; /* complete */
2181         };
2182         if (!rf) {
2183                 /* read from future: need to settle this later */
2184                 pending->rf = NULL;
2185                 return false; /* incomplete */
2186         }
2187
2188         if (rf->is_release())
2189                 return true; /* complete */
2190
2191         /* else relaxed write
2192          * - check for fence-release in the same thread (29.8, stmt. 3)
2193          * - check modification order for contiguous subsequence
2194          *   -> rf must be same thread as release */
2195
2196         const ModelAction *fence_release = rf->get_last_fence_release();
2197         /* Synchronize with a fence-release unconditionally; we don't need to
2198          * find any more "contiguous subsequence..." for it */
2199         if (fence_release)
2200                 release_heads->push_back(fence_release);
2201
2202         int tid = id_to_int(rf->get_tid());
2203         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2204         action_list_t *list = &(*thrd_lists)[tid];
2205         action_list_t::const_reverse_iterator rit;
2206
2207         /* Find rf in the thread list */
2208         rit = std::find(list->rbegin(), list->rend(), rf);
2209         ASSERT(rit != list->rend());
2210
2211         /* Find the last {write,fence}-release */
2212         for (; rit != list->rend(); rit++) {
2213                 if (fence_release && *(*rit) < *fence_release)
2214                         break;
2215                 if ((*rit)->is_release())
2216                         break;
2217         }
2218         if (rit == list->rend()) {
2219                 /* No write-release in this thread */
2220                 return true; /* complete */
2221         } else if (fence_release && *(*rit) < *fence_release) {
2222                 /* The fence-release is more recent (and so, "stronger") than
2223                  * the most recent write-release */
2224                 return true; /* complete */
2225         } /* else, need to establish contiguous release sequence */
2226         ModelAction *release = *rit;
2227
2228         ASSERT(rf->same_thread(release));
2229
2230         pending->writes.clear();
2231
2232         bool certain = true;
2233         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2234                 if (id_to_int(rf->get_tid()) == (int)i)
2235                         continue;
2236                 list = &(*thrd_lists)[i];
2237
2238                 /* Can we ensure no future writes from this thread may break
2239                  * the release seq? */
2240                 bool future_ordered = false;
2241
2242                 ModelAction *last = get_last_action(int_to_id(i));
2243                 Thread *th = get_thread(int_to_id(i));
2244                 if ((last && rf->happens_before(last)) ||
2245                                 !is_enabled(th) ||
2246                                 th->is_complete())
2247                         future_ordered = true;
2248
2249                 ASSERT(!th->is_model_thread() || future_ordered);
2250
2251                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2252                         const ModelAction *act = *rit;
2253                         /* Reach synchronization -> this thread is complete */
2254                         if (act->happens_before(release))
2255                                 break;
2256                         if (rf->happens_before(act)) {
2257                                 future_ordered = true;
2258                                 continue;
2259                         }
2260
2261                         /* Only non-RMW writes can break release sequences */
2262                         if (!act->is_write() || act->is_rmw())
2263                                 continue;
2264
2265                         /* Check modification order */
2266                         if (mo_graph->checkReachable(rf, act)) {
2267                                 /* rf --mo--> act */
2268                                 future_ordered = true;
2269                                 continue;
2270                         }
2271                         if (mo_graph->checkReachable(act, release))
2272                                 /* act --mo--> release */
2273                                 break;
2274                         if (mo_graph->checkReachable(release, act) &&
2275                                       mo_graph->checkReachable(act, rf)) {
2276                                 /* release --mo-> act --mo--> rf */
2277                                 return true; /* complete */
2278                         }
2279                         /* act may break release sequence */
2280                         pending->writes.push_back(act);
2281                         certain = false;
2282                 }
2283                 if (!future_ordered)
2284                         certain = false; /* This thread is uncertain */
2285         }
2286
2287         if (certain) {
2288                 release_heads->push_back(release);
2289                 pending->writes.clear();
2290         } else {
2291                 pending->release = release;
2292                 pending->rf = rf;
2293         }
2294         return certain;
2295 }
2296
2297 /**
2298  * An interface for getting the release sequence head(s) with which a
2299  * given ModelAction must synchronize. This function only returns a non-empty
2300  * result when it can locate a release sequence head with certainty. Otherwise,
2301  * it may mark the internal state of the ModelChecker so that it will handle
2302  * the release sequence at a later time, causing @a acquire to update its
2303  * synchronization at some later point in execution.
2304  *
2305  * @param acquire The 'acquire' action that may synchronize with a release
2306  * sequence
2307  * @param read The read action that may read from a release sequence; this may
2308  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2309  * when 'acquire' is a fence-acquire)
2310  * @param release_heads A pass-by-reference return parameter. Will be filled
2311  * with the head(s) of the release sequence(s), if they exists with certainty.
2312  * @see ModelChecker::release_seq_heads
2313  */
2314 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2315                 ModelAction *read, rel_heads_list_t *release_heads)
2316 {
2317         const ModelAction *rf = read->get_reads_from();
2318         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2319         sequence->acquire = acquire;
2320         sequence->read = read;
2321
2322         if (!release_seq_heads(rf, release_heads, sequence)) {
2323                 /* add act to 'lazy checking' list */
2324                 pending_rel_seqs->push_back(sequence);
2325         } else {
2326                 snapshot_free(sequence);
2327         }
2328 }
2329
2330 /**
2331  * Attempt to resolve all stashed operations that might synchronize with a
2332  * release sequence for a given location. This implements the "lazy" portion of
2333  * determining whether or not a release sequence was contiguous, since not all
2334  * modification order information is present at the time an action occurs.
2335  *
2336  * @param location The location/object that should be checked for release
2337  * sequence resolutions. A NULL value means to check all locations.
2338  * @param work_queue The work queue to which to add work items as they are
2339  * generated
2340  * @return True if any updates occurred (new synchronization, new mo_graph
2341  * edges)
2342  */
2343 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2344 {
2345         bool updated = false;
2346         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2347         while (it != pending_rel_seqs->end()) {
2348                 struct release_seq *pending = *it;
2349                 ModelAction *acquire = pending->acquire;
2350                 const ModelAction *read = pending->read;
2351
2352                 /* Only resolve sequences on the given location, if provided */
2353                 if (location && read->get_location() != location) {
2354                         it++;
2355                         continue;
2356                 }
2357
2358                 const ModelAction *rf = read->get_reads_from();
2359                 rel_heads_list_t release_heads;
2360                 bool complete;
2361                 complete = release_seq_heads(rf, &release_heads, pending);
2362                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2363                         if (!acquire->has_synchronized_with(release_heads[i])) {
2364                                 if (acquire->synchronize_with(release_heads[i]))
2365                                         updated = true;
2366                                 else
2367                                         set_bad_synchronization();
2368                         }
2369                 }
2370
2371                 if (updated) {
2372                         /* Re-check all pending release sequences */
2373                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2374                         /* Re-check read-acquire for mo_graph edges */
2375                         if (acquire->is_read())
2376                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2377
2378                         /* propagate synchronization to later actions */
2379                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2380                         for (; (*rit) != acquire; rit++) {
2381                                 ModelAction *propagate = *rit;
2382                                 if (acquire->happens_before(propagate)) {
2383                                         propagate->synchronize_with(acquire);
2384                                         /* Re-check 'propagate' for mo_graph edges */
2385                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2386                                 }
2387                         }
2388                 }
2389                 if (complete) {
2390                         it = pending_rel_seqs->erase(it);
2391                         snapshot_free(pending);
2392                 } else {
2393                         it++;
2394                 }
2395         }
2396
2397         // If we resolved promises or data races, see if we have realized a data race.
2398         checkDataRaces();
2399
2400         return updated;
2401 }
2402
2403 /**
2404  * Performs various bookkeeping operations for the current ModelAction. For
2405  * instance, adds action to the per-object, per-thread action vector and to the
2406  * action trace list of all thread actions.
2407  *
2408  * @param act is the ModelAction to add.
2409  */
2410 void ModelChecker::add_action_to_lists(ModelAction *act)
2411 {
2412         int tid = id_to_int(act->get_tid());
2413         ModelAction *uninit = NULL;
2414         int uninit_id = -1;
2415         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2416         if (list->empty() && act->is_atomic_var()) {
2417                 uninit = get_uninitialized_action(act);
2418                 uninit_id = id_to_int(uninit->get_tid());
2419                 list->push_front(uninit);
2420         }
2421         list->push_back(act);
2422
2423         action_trace->push_back(act);
2424         if (uninit)
2425                 action_trace->push_front(uninit);
2426
2427         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2428         if (tid >= (int)vec->size())
2429                 vec->resize(priv->next_thread_id);
2430         (*vec)[tid].push_back(act);
2431         if (uninit)
2432                 (*vec)[uninit_id].push_front(uninit);
2433
2434         if ((int)thrd_last_action->size() <= tid)
2435                 thrd_last_action->resize(get_num_threads());
2436         (*thrd_last_action)[tid] = act;
2437         if (uninit)
2438                 (*thrd_last_action)[uninit_id] = uninit;
2439
2440         if (act->is_fence() && act->is_release()) {
2441                 if ((int)thrd_last_fence_release->size() <= tid)
2442                         thrd_last_fence_release->resize(get_num_threads());
2443                 (*thrd_last_fence_release)[tid] = act;
2444         }
2445
2446         if (act->is_wait()) {
2447                 void *mutex_loc = (void *) act->get_value();
2448                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2449
2450                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2451                 if (tid >= (int)vec->size())
2452                         vec->resize(priv->next_thread_id);
2453                 (*vec)[tid].push_back(act);
2454         }
2455 }
2456
2457 /**
2458  * @brief Get the last action performed by a particular Thread
2459  * @param tid The thread ID of the Thread in question
2460  * @return The last action in the thread
2461  */
2462 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2463 {
2464         int threadid = id_to_int(tid);
2465         if (threadid < (int)thrd_last_action->size())
2466                 return (*thrd_last_action)[id_to_int(tid)];
2467         else
2468                 return NULL;
2469 }
2470
2471 /**
2472  * @brief Get the last fence release performed by a particular Thread
2473  * @param tid The thread ID of the Thread in question
2474  * @return The last fence release in the thread, if one exists; NULL otherwise
2475  */
2476 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2477 {
2478         int threadid = id_to_int(tid);
2479         if (threadid < (int)thrd_last_fence_release->size())
2480                 return (*thrd_last_fence_release)[id_to_int(tid)];
2481         else
2482                 return NULL;
2483 }
2484
2485 /**
2486  * Gets the last memory_order_seq_cst write (in the total global sequence)
2487  * performed on a particular object (i.e., memory location), not including the
2488  * current action.
2489  * @param curr The current ModelAction; also denotes the object location to
2490  * check
2491  * @return The last seq_cst write
2492  */
2493 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2494 {
2495         void *location = curr->get_location();
2496         action_list_t *list = get_safe_ptr_action(obj_map, location);
2497         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2498         action_list_t::reverse_iterator rit;
2499         for (rit = list->rbegin(); (*rit) != curr; rit++)
2500                 ;
2501         rit++; /* Skip past curr */
2502         for ( ; rit != list->rend(); rit++)
2503                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2504                         return *rit;
2505         return NULL;
2506 }
2507
2508 /**
2509  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2510  * performed in a particular thread, prior to a particular fence.
2511  * @param tid The ID of the thread to check
2512  * @param before_fence The fence from which to begin the search; if NULL, then
2513  * search for the most recent fence in the thread.
2514  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2515  */
2516 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2517 {
2518         /* All fences should have NULL location */
2519         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2520         action_list_t::reverse_iterator rit = list->rbegin();
2521
2522         if (before_fence) {
2523                 for (; rit != list->rend(); rit++)
2524                         if (*rit == before_fence)
2525                                 break;
2526
2527                 ASSERT(*rit == before_fence);
2528                 rit++;
2529         }
2530
2531         for (; rit != list->rend(); rit++)
2532                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2533                         return *rit;
2534         return NULL;
2535 }
2536
2537 /**
2538  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2539  * location). This function identifies the mutex according to the current
2540  * action, which is presumed to perform on the same mutex.
2541  * @param curr The current ModelAction; also denotes the object location to
2542  * check
2543  * @return The last unlock operation
2544  */
2545 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2546 {
2547         void *location = curr->get_location();
2548         action_list_t *list = get_safe_ptr_action(obj_map, location);
2549         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2550         action_list_t::reverse_iterator rit;
2551         for (rit = list->rbegin(); rit != list->rend(); rit++)
2552                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2553                         return *rit;
2554         return NULL;
2555 }
2556
2557 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2558 {
2559         ModelAction *parent = get_last_action(tid);
2560         if (!parent)
2561                 parent = get_thread(tid)->get_creation();
2562         return parent;
2563 }
2564
2565 /**
2566  * Returns the clock vector for a given thread.
2567  * @param tid The thread whose clock vector we want
2568  * @return Desired clock vector
2569  */
2570 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2571 {
2572         return get_parent_action(tid)->get_cv();
2573 }
2574
2575 /**
2576  * @brief Find the promise (if any) to resolve for the current action and
2577  * remove it from the pending promise vector
2578  * @param curr The current ModelAction. Should be a write.
2579  * @return The Promise to resolve, if any; otherwise NULL
2580  */
2581 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2582 {
2583         for (unsigned int i = 0; i < promises->size(); i++)
2584                 if (curr->get_node()->get_promise(i)) {
2585                         Promise *ret = (*promises)[i];
2586                         promises->erase(promises->begin() + i);
2587                         return ret;
2588                 }
2589         return NULL;
2590 }
2591
2592 /**
2593  * Resolve a Promise with a current write.
2594  * @param write The ModelAction that is fulfilling Promises
2595  * @param promise The Promise to resolve
2596  * @return True if the Promise was successfully resolved; false otherwise
2597  */
2598 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2599 {
2600         ModelVector<ModelAction *> actions_to_check;
2601
2602         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2603                 ModelAction *read = promise->get_reader(i);
2604                 read_from(read, write);
2605                 actions_to_check.push_back(read);
2606         }
2607         /* Make sure the promise's value matches the write's value */
2608         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2609         if (!mo_graph->resolvePromise(promise, write))
2610                 priv->failed_promise = true;
2611
2612         /**
2613          * @todo  It is possible to end up in an inconsistent state, where a
2614          * "resolved" promise may still be referenced if
2615          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2616          *
2617          * Note that the inconsistency only matters when dumping mo_graph to
2618          * file.
2619          *
2620          * delete promise;
2621          */
2622
2623         //Check whether reading these writes has made threads unable to
2624         //resolve promises
2625         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2626                 ModelAction *read = actions_to_check[i];
2627                 mo_check_promises(read, true);
2628         }
2629
2630         return true;
2631 }
2632
2633 /**
2634  * Compute the set of promises that could potentially be satisfied by this
2635  * action. Note that the set computation actually appears in the Node, not in
2636  * ModelChecker.
2637  * @param curr The ModelAction that may satisfy promises
2638  */
2639 void ModelChecker::compute_promises(ModelAction *curr)
2640 {
2641         for (unsigned int i = 0; i < promises->size(); i++) {
2642                 Promise *promise = (*promises)[i];
2643                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2644                         continue;
2645
2646                 bool satisfy = true;
2647                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2648                         const ModelAction *act = promise->get_reader(j);
2649                         if (act->happens_before(curr) ||
2650                                         act->could_synchronize_with(curr)) {
2651                                 satisfy = false;
2652                                 break;
2653                         }
2654                 }
2655                 if (satisfy)
2656                         curr->get_node()->set_promise(i);
2657         }
2658 }
2659
2660 /** Checks promises in response to change in ClockVector Threads. */
2661 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2662 {
2663         for (unsigned int i = 0; i < promises->size(); i++) {
2664                 Promise *promise = (*promises)[i];
2665                 if (!promise->thread_is_available(tid))
2666                         continue;
2667                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2668                         const ModelAction *act = promise->get_reader(j);
2669                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2670                                         merge_cv->synchronized_since(act)) {
2671                                 if (promise->eliminate_thread(tid)) {
2672                                         /* Promise has failed */
2673                                         priv->failed_promise = true;
2674                                         return;
2675                                 }
2676                         }
2677                 }
2678         }
2679 }
2680
2681 void ModelChecker::check_promises_thread_disabled()
2682 {
2683         for (unsigned int i = 0; i < promises->size(); i++) {
2684                 Promise *promise = (*promises)[i];
2685                 if (promise->has_failed()) {
2686                         priv->failed_promise = true;
2687                         return;
2688                 }
2689         }
2690 }
2691
2692 /**
2693  * @brief Checks promises in response to addition to modification order for
2694  * threads.
2695  *
2696  * We test whether threads are still available for satisfying promises after an
2697  * addition to our modification order constraints. Those that are unavailable
2698  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2699  * that promise has failed.
2700  *
2701  * @param act The ModelAction which updated the modification order
2702  * @param is_read_check Should be true if act is a read and we must check for
2703  * updates to the store from which it read (there is a distinction here for
2704  * RMW's, which are both a load and a store)
2705  */
2706 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2707 {
2708         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2709
2710         for (unsigned int i = 0; i < promises->size(); i++) {
2711                 Promise *promise = (*promises)[i];
2712
2713                 // Is this promise on the same location?
2714                 if (!promise->same_location(write))
2715                         continue;
2716
2717                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2718                         const ModelAction *pread = promise->get_reader(j);
2719                         if (!pread->happens_before(act))
2720                                continue;
2721                         if (mo_graph->checkPromise(write, promise)) {
2722                                 priv->failed_promise = true;
2723                                 return;
2724                         }
2725                         break;
2726                 }
2727
2728                 // Don't do any lookups twice for the same thread
2729                 if (!promise->thread_is_available(act->get_tid()))
2730                         continue;
2731
2732                 if (mo_graph->checkReachable(promise, write)) {
2733                         if (mo_graph->checkPromise(write, promise)) {
2734                                 priv->failed_promise = true;
2735                                 return;
2736                         }
2737                 }
2738         }
2739 }
2740
2741 /**
2742  * Compute the set of writes that may break the current pending release
2743  * sequence. This information is extracted from previou release sequence
2744  * calculations.
2745  *
2746  * @param curr The current ModelAction. Must be a release sequence fixup
2747  * action.
2748  */
2749 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2750 {
2751         if (pending_rel_seqs->empty())
2752                 return;
2753
2754         struct release_seq *pending = pending_rel_seqs->back();
2755         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2756                 const ModelAction *write = pending->writes[i];
2757                 curr->get_node()->add_relseq_break(write);
2758         }
2759
2760         /* NULL means don't break the sequence; just synchronize */
2761         curr->get_node()->add_relseq_break(NULL);
2762 }
2763
2764 /**
2765  * Build up an initial set of all past writes that this 'read' action may read
2766  * from, as well as any previously-observed future values that must still be valid.
2767  *
2768  * @param curr is the current ModelAction that we are exploring; it must be a
2769  * 'read' operation.
2770  */
2771 void ModelChecker::build_may_read_from(ModelAction *curr)
2772 {
2773         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2774         unsigned int i;
2775         ASSERT(curr->is_read());
2776
2777         ModelAction *last_sc_write = NULL;
2778
2779         if (curr->is_seqcst())
2780                 last_sc_write = get_last_seq_cst_write(curr);
2781
2782         /* Iterate over all threads */
2783         for (i = 0; i < thrd_lists->size(); i++) {
2784                 /* Iterate over actions in thread, starting from most recent */
2785                 action_list_t *list = &(*thrd_lists)[i];
2786                 action_list_t::reverse_iterator rit;
2787                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2788                         ModelAction *act = *rit;
2789
2790                         /* Only consider 'write' actions */
2791                         if (!act->is_write() || act == curr)
2792                                 continue;
2793
2794                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2795                         bool allow_read = true;
2796
2797                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2798                                 allow_read = false;
2799                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2800                                 allow_read = false;
2801
2802                         if (allow_read) {
2803                                 /* Only add feasible reads */
2804                                 mo_graph->startChanges();
2805                                 r_modification_order(curr, act);
2806                                 if (!is_infeasible())
2807                                         curr->get_node()->add_read_from_past(act);
2808                                 mo_graph->rollbackChanges();
2809                         }
2810
2811                         /* Include at most one act per-thread that "happens before" curr */
2812                         if (act->happens_before(curr))
2813                                 break;
2814                 }
2815         }
2816
2817         /* Inherit existing, promised future values */
2818         for (i = 0; i < promises->size(); i++) {
2819                 const Promise *promise = (*promises)[i];
2820                 const ModelAction *promise_read = promise->get_reader(0);
2821                 if (promise_read->same_var(curr)) {
2822                         /* Only add feasible future-values */
2823                         mo_graph->startChanges();
2824                         r_modification_order(curr, promise);
2825                         if (!is_infeasible())
2826                                 curr->get_node()->add_read_from_promise(promise_read);
2827                         mo_graph->rollbackChanges();
2828                 }
2829         }
2830
2831         /* We may find no valid may-read-from only if the execution is doomed */
2832         if (!curr->get_node()->read_from_size()) {
2833                 priv->no_valid_reads = true;
2834                 set_assert();
2835         }
2836
2837         if (DBG_ENABLED()) {
2838                 model_print("Reached read action:\n");
2839                 curr->print();
2840                 model_print("Printing read_from_past\n");
2841                 curr->get_node()->print_read_from_past();
2842                 model_print("End printing read_from_past\n");
2843         }
2844 }
2845
2846 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2847 {
2848         for ( ; write != NULL; write = write->get_reads_from()) {
2849                 /* UNINIT actions don't have a Node, and they never sleep */
2850                 if (write->is_uninitialized())
2851                         return true;
2852                 Node *prevnode = write->get_node()->get_parent();
2853
2854                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2855                 if (write->is_release() && thread_sleep)
2856                         return true;
2857                 if (!write->is_rmw())
2858                         return false;
2859         }
2860         return true;
2861 }
2862
2863 /**
2864  * @brief Get an action representing an uninitialized atomic
2865  *
2866  * This function may create a new one or try to retrieve one from the NodeStack
2867  *
2868  * @param curr The current action, which prompts the creation of an UNINIT action
2869  * @return A pointer to the UNINIT ModelAction
2870  */
2871 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2872 {
2873         Node *node = curr->get_node();
2874         ModelAction *act = node->get_uninit_action();
2875         if (!act) {
2876                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2877                 node->set_uninit_action(act);
2878         }
2879         act->create_cv(NULL);
2880         return act;
2881 }
2882
2883 static void print_list(action_list_t *list)
2884 {
2885         action_list_t::iterator it;
2886
2887         model_print("---------------------------------------------------------------------\n");
2888
2889         unsigned int hash = 0;
2890
2891         for (it = list->begin(); it != list->end(); it++) {
2892                 const ModelAction *act = *it;
2893                 if (act->get_seq_number() > 0)
2894                         act->print();
2895                 hash = hash^(hash<<3)^((*it)->hash());
2896         }
2897         model_print("HASH %u\n", hash);
2898         model_print("---------------------------------------------------------------------\n");
2899 }
2900
2901 #if SUPPORT_MOD_ORDER_DUMP
2902 void ModelChecker::dumpGraph(char *filename) const
2903 {
2904         char buffer[200];
2905         sprintf(buffer, "%s.dot", filename);
2906         FILE *file = fopen(buffer, "w");
2907         fprintf(file, "digraph %s {\n", filename);
2908         mo_graph->dumpNodes(file);
2909         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2910
2911         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2912                 ModelAction *act = *it;
2913                 if (act->is_read()) {
2914                         mo_graph->dot_print_node(file, act);
2915                         if (act->get_reads_from())
2916                                 mo_graph->dot_print_edge(file,
2917                                                 act->get_reads_from(),
2918                                                 act,
2919                                                 "label=\"rf\", color=red, weight=2");
2920                         else
2921                                 mo_graph->dot_print_edge(file,
2922                                                 act->get_reads_from_promise(),
2923                                                 act,
2924                                                 "label=\"rf\", color=red");
2925                 }
2926                 if (thread_array[act->get_tid()]) {
2927                         mo_graph->dot_print_edge(file,
2928                                         thread_array[id_to_int(act->get_tid())],
2929                                         act,
2930                                         "label=\"sb\", color=blue, weight=400");
2931                 }
2932
2933                 thread_array[act->get_tid()] = act;
2934         }
2935         fprintf(file, "}\n");
2936         model_free(thread_array);
2937         fclose(file);
2938 }
2939 #endif
2940
2941 /** @brief Prints an execution trace summary. */
2942 void ModelChecker::print_summary() const
2943 {
2944 #if SUPPORT_MOD_ORDER_DUMP
2945         char buffername[100];
2946         sprintf(buffername, "exec%04u", stats.num_total);
2947         mo_graph->dumpGraphToFile(buffername);
2948         sprintf(buffername, "graph%04u", stats.num_total);
2949         dumpGraph(buffername);
2950 #endif
2951
2952         model_print("Execution %d:", stats.num_total);
2953         if (isfeasibleprefix()) {
2954                 if (scheduler->all_threads_sleeping())
2955                         model_print(" SLEEP-SET REDUNDANT");
2956                 model_print("\n");
2957         } else
2958                 print_infeasibility(" INFEASIBLE");
2959         print_list(action_trace);
2960         model_print("\n");
2961         if (!promises->empty()) {
2962                 model_print("Pending promises:\n");
2963                 for (unsigned int i = 0; i < promises->size(); i++) {
2964                         model_print(" [P%u] ", i);
2965                         (*promises)[i]->print();
2966                 }
2967                 model_print("\n");
2968         }
2969 }
2970
2971 /**
2972  * Add a Thread to the system for the first time. Should only be called once
2973  * per thread.
2974  * @param t The Thread to add
2975  */
2976 void ModelChecker::add_thread(Thread *t)
2977 {
2978         thread_map->put(id_to_int(t->get_id()), t);
2979         scheduler->add_thread(t);
2980 }
2981
2982 /**
2983  * @brief Get a Thread reference by its ID
2984  * @param tid The Thread's ID
2985  * @return A Thread reference
2986  */
2987 Thread * ModelChecker::get_thread(thread_id_t tid) const
2988 {
2989         return thread_map->get(id_to_int(tid));
2990 }
2991
2992 /**
2993  * @brief Get a reference to the Thread in which a ModelAction was executed
2994  * @param act The ModelAction
2995  * @return A Thread reference
2996  */
2997 Thread * ModelChecker::get_thread(const ModelAction *act) const
2998 {
2999         return get_thread(act->get_tid());
3000 }
3001
3002 /**
3003  * @brief Get a Promise's "promise number"
3004  *
3005  * A "promise number" is an index number that is unique to a promise, valid
3006  * only for a specific snapshot of an execution trace. Promises may come and go
3007  * as they are generated an resolved, so an index only retains meaning for the
3008  * current snapshot.
3009  *
3010  * @param promise The Promise to check
3011  * @return The promise index, if the promise still is valid; otherwise -1
3012  */
3013 int ModelChecker::get_promise_number(const Promise *promise) const
3014 {
3015         for (unsigned int i = 0; i < promises->size(); i++)
3016                 if ((*promises)[i] == promise)
3017                         return i;
3018         /* Not found */
3019         return -1;
3020 }
3021
3022 /**
3023  * @brief Check if a Thread is currently enabled
3024  * @param t The Thread to check
3025  * @return True if the Thread is currently enabled
3026  */
3027 bool ModelChecker::is_enabled(Thread *t) const
3028 {
3029         return scheduler->is_enabled(t);
3030 }
3031
3032 /**
3033  * @brief Check if a Thread is currently enabled
3034  * @param tid The ID of the Thread to check
3035  * @return True if the Thread is currently enabled
3036  */
3037 bool ModelChecker::is_enabled(thread_id_t tid) const
3038 {
3039         return scheduler->is_enabled(tid);
3040 }
3041
3042 /**
3043  * Switch from a model-checker context to a user-thread context. This is the
3044  * complement of ModelChecker::switch_to_master and must be called from the
3045  * model-checker context
3046  *
3047  * @param thread The user-thread to switch to
3048  */
3049 void ModelChecker::switch_from_master(Thread *thread)
3050 {
3051         scheduler->set_current_thread(thread);
3052         Thread::swap(&system_context, thread);
3053 }
3054
3055 /**
3056  * Switch from a user-context to the "master thread" context (a.k.a. system
3057  * context). This switch is made with the intention of exploring a particular
3058  * model-checking action (described by a ModelAction object). Must be called
3059  * from a user-thread context.
3060  *
3061  * @param act The current action that will be explored. May be NULL only if
3062  * trace is exiting via an assertion (see ModelChecker::set_assert and
3063  * ModelChecker::has_asserted).
3064  * @return Return the value returned by the current action
3065  */
3066 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3067 {
3068         DBG();
3069         Thread *old = thread_current();
3070         scheduler->set_current_thread(NULL);
3071         ASSERT(!old->get_pending());
3072         old->set_pending(act);
3073         if (Thread::swap(old, &system_context) < 0) {
3074                 perror("swap threads");
3075                 exit(EXIT_FAILURE);
3076         }
3077         return old->get_return_value();
3078 }
3079
3080 /**
3081  * Takes the next step in the execution, if possible.
3082  * @param curr The current step to take
3083  * @return Returns the next Thread to run, if any; NULL if this execution
3084  * should terminate
3085  */
3086 Thread * ModelChecker::take_step(ModelAction *curr)
3087 {
3088         Thread *curr_thrd = get_thread(curr);
3089         ASSERT(curr_thrd->get_state() == THREAD_READY);
3090
3091         ASSERT(check_action_enabled(curr)); /* May have side effects? */
3092         curr = check_current_action(curr);
3093         ASSERT(curr);
3094
3095         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3096                 scheduler->remove_thread(curr_thrd);
3097
3098         return action_select_next_thread(curr);
3099 }
3100
3101 /** Wrapper to run the user's main function, with appropriate arguments */
3102 void user_main_wrapper(void *)
3103 {
3104         user_main(model->params.argc, model->params.argv);
3105 }
3106
3107 bool ModelChecker::should_terminate_execution()
3108 {
3109         /* Infeasible -> don't take any more steps */
3110         if (is_infeasible())
3111                 return true;
3112         else if (isfeasibleprefix() && have_bug_reports()) {
3113                 set_assert();
3114                 return true;
3115         }
3116
3117         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3118                 return true;
3119         return false;
3120 }
3121
3122 /** @brief Run ModelChecker for the user program */
3123 void ModelChecker::run()
3124 {
3125         do {
3126                 thrd_t user_thread;
3127                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3128                 add_thread(t);
3129
3130                 do {
3131                         /*
3132                          * Stash next pending action(s) for thread(s). There
3133                          * should only need to stash one thread's action--the
3134                          * thread which just took a step--plus the first step
3135                          * for any newly-created thread
3136                          */
3137                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3138                                 thread_id_t tid = int_to_id(i);
3139                                 Thread *thr = get_thread(tid);
3140                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3141                                         switch_from_master(thr);
3142                                         if (thr->is_waiting_on(thr))
3143                                                 assert_bug("Deadlock detected (thread %u)", i);
3144                                 }
3145                         }
3146
3147                         /* Don't schedule threads which should be disabled */
3148                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3149                                 Thread *th = get_thread(int_to_id(i));
3150                                 ModelAction *act = th->get_pending();
3151                                 if (act && is_enabled(th) && !check_action_enabled(act)) {
3152                                         scheduler->sleep(th);
3153                                 }
3154                         }
3155
3156                         /* Catch assertions from prior take_step or from
3157                          * between-ModelAction bugs (e.g., data races) */
3158                         if (has_asserted())
3159                                 break;
3160
3161                         if (!t)
3162                                 t = get_next_thread();
3163                         if (!t || t->is_model_thread())
3164                                 break;
3165
3166                         /* Consume the next action for a Thread */
3167                         ModelAction *curr = t->get_pending();
3168                         t->set_pending(NULL);
3169                         t = take_step(curr);
3170                 } while (!should_terminate_execution());
3171
3172                 /*
3173                  * Launch end-of-execution release sequence fixups only when
3174                  * the execution is otherwise feasible AND there are:
3175                  *
3176                  * (1) pending release sequences
3177                  * (2) pending assertions that could be invalidated by a change
3178                  * in clock vectors (i.e., data races)
3179                  * (3) no pending promises
3180                  */
3181                 while (!pending_rel_seqs->empty() &&
3182                                 is_feasible_prefix_ignore_relseq() &&
3183                                 !unrealizedraces.empty()) {
3184                         model_print("*** WARNING: release sequence fixup action "
3185                                         "(%zu pending release seuqence(s)) ***\n",
3186                                         pending_rel_seqs->size());
3187                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3188                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3189                                         model_thread);
3190                         take_step(fixup);
3191                 };
3192         } while (next_execution());
3193
3194         model_print("******* Model-checking complete: *******\n");
3195         print_stats();
3196 }