cyclegraph: clean up mo_graph dump
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else
487                 stats.num_redundant++;
488 }
489
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
492 {
493         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494         model_print("Number of redundant executions: %d\n", stats.num_redundant);
495         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497         model_print("Total executions: %d\n", stats.num_total);
498         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
499 }
500
501 /**
502  * @brief End-of-exeuction print
503  * @param printbugs Should any existing bugs be printed?
504  */
505 void ModelChecker::print_execution(bool printbugs) const
506 {
507         print_program_output();
508
509         if (DBG_ENABLED() || params.verbose) {
510                 model_print("Earliest divergence point since last feasible execution:\n");
511                 if (earliest_diverge)
512                         earliest_diverge->print();
513                 else
514                         model_print("(Not set)\n");
515
516                 model_print("\n");
517                 print_stats();
518         }
519
520         /* Don't print invalid bugs */
521         if (printbugs)
522                 print_bugs();
523
524         model_print("\n");
525         print_summary();
526 }
527
528 /**
529  * Queries the model-checker for more executions to explore and, if one
530  * exists, resets the model-checker state to execute a new execution.
531  *
532  * @return If there are more executions to explore, return true. Otherwise,
533  * return false.
534  */
535 bool ModelChecker::next_execution()
536 {
537         DBG();
538         /* Is this execution a feasible execution that's worth bug-checking? */
539         bool complete = isfeasibleprefix() && (is_complete_execution() ||
540                         have_bug_reports());
541
542         /* End-of-execution bug checks */
543         if (complete) {
544                 if (is_deadlocked())
545                         assert_bug("Deadlock detected");
546
547                 checkDataRaces();
548         }
549
550         record_stats();
551
552         /* Output */
553         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
554                 print_execution(complete);
555         else
556                 clear_program_output();
557
558         if (complete)
559                 earliest_diverge = NULL;
560
561         if ((diverge = get_next_backtrack()) == NULL)
562                 return false;
563
564         if (DBG_ENABLED()) {
565                 model_print("Next execution will diverge at:\n");
566                 diverge->print();
567         }
568
569         reset_to_initial_state();
570         return true;
571 }
572
573 /**
574  * @brief Find the last fence-related backtracking conflict for a ModelAction
575  *
576  * This function performs the search for the most recent conflicting action
577  * against which we should perform backtracking, as affected by fence
578  * operations. This includes pairs of potentially-synchronizing actions which
579  * occur due to fence-acquire or fence-release, and hence should be explored in
580  * the opposite execution order.
581  *
582  * @param act The current action
583  * @return The most recent action which conflicts with act due to fences
584  */
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
586 {
587         /* Only perform release/acquire fence backtracking for stores */
588         if (!act->is_write())
589                 return NULL;
590
591         /* Find a fence-release (or, act is a release) */
592         ModelAction *last_release;
593         if (act->is_release())
594                 last_release = act;
595         else
596                 last_release = get_last_fence_release(act->get_tid());
597         if (!last_release)
598                 return NULL;
599
600         /* Skip past the release */
601         action_list_t *list = action_trace;
602         action_list_t::reverse_iterator rit;
603         for (rit = list->rbegin(); rit != list->rend(); rit++)
604                 if (*rit == last_release)
605                         break;
606         ASSERT(rit != list->rend());
607
608         /* Find a prior:
609          *   load-acquire
610          * or
611          *   load --sb-> fence-acquire */
612         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
613         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
614         bool found_acquire_fences = false;
615         for ( ; rit != list->rend(); rit++) {
616                 ModelAction *prev = *rit;
617                 if (act->same_thread(prev))
618                         continue;
619
620                 int tid = id_to_int(prev->get_tid());
621
622                 if (prev->is_read() && act->same_var(prev)) {
623                         if (prev->is_acquire()) {
624                                 /* Found most recent load-acquire, don't need
625                                  * to search for more fences */
626                                 if (!found_acquire_fences)
627                                         return NULL;
628                         } else {
629                                 prior_loads[tid] = prev;
630                         }
631                 }
632                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633                         found_acquire_fences = true;
634                         acquire_fences[tid] = prev;
635                 }
636         }
637
638         ModelAction *latest_backtrack = NULL;
639         for (unsigned int i = 0; i < acquire_fences.size(); i++)
640                 if (acquire_fences[i] && prior_loads[i])
641                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642                                 latest_backtrack = acquire_fences[i];
643         return latest_backtrack;
644 }
645
646 /**
647  * @brief Find the last backtracking conflict for a ModelAction
648  *
649  * This function performs the search for the most recent conflicting action
650  * against which we should perform backtracking. This primary includes pairs of
651  * synchronizing actions which should be explored in the opposite execution
652  * order.
653  *
654  * @param act The current action
655  * @return The most recent action which conflicts with act
656  */
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
658 {
659         switch (act->get_type()) {
660         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
661         case ATOMIC_READ:
662         case ATOMIC_WRITE:
663         case ATOMIC_RMW: {
664                 ModelAction *ret = NULL;
665
666                 /* linear search: from most recent to oldest */
667                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668                 action_list_t::reverse_iterator rit;
669                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670                         ModelAction *prev = *rit;
671                         if (prev->could_synchronize_with(act)) {
672                                 ret = prev;
673                                 break;
674                         }
675                 }
676
677                 ModelAction *ret2 = get_last_fence_conflict(act);
678                 if (!ret2)
679                         return ret;
680                 if (!ret)
681                         return ret2;
682                 if (*ret < *ret2)
683                         return ret2;
684                 return ret;
685         }
686         case ATOMIC_LOCK:
687         case ATOMIC_TRYLOCK: {
688                 /* linear search: from most recent to oldest */
689                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690                 action_list_t::reverse_iterator rit;
691                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692                         ModelAction *prev = *rit;
693                         if (act->is_conflicting_lock(prev))
694                                 return prev;
695                 }
696                 break;
697         }
698         case ATOMIC_UNLOCK: {
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (!act->same_thread(prev) && prev->is_failed_trylock())
705                                 return prev;
706                 }
707                 break;
708         }
709         case ATOMIC_WAIT: {
710                 /* linear search: from most recent to oldest */
711                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712                 action_list_t::reverse_iterator rit;
713                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714                         ModelAction *prev = *rit;
715                         if (!act->same_thread(prev) && prev->is_failed_trylock())
716                                 return prev;
717                         if (!act->same_thread(prev) && prev->is_notify())
718                                 return prev;
719                 }
720                 break;
721         }
722
723         case ATOMIC_NOTIFY_ALL:
724         case ATOMIC_NOTIFY_ONE: {
725                 /* linear search: from most recent to oldest */
726                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727                 action_list_t::reverse_iterator rit;
728                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729                         ModelAction *prev = *rit;
730                         if (!act->same_thread(prev) && prev->is_wait())
731                                 return prev;
732                 }
733                 break;
734         }
735         default:
736                 break;
737         }
738         return NULL;
739 }
740
741 /** This method finds backtracking points where we should try to
742  * reorder the parameter ModelAction against.
743  *
744  * @param the ModelAction to find backtracking points for.
745  */
746 void ModelChecker::set_backtracking(ModelAction *act)
747 {
748         Thread *t = get_thread(act);
749         ModelAction *prev = get_last_conflict(act);
750         if (prev == NULL)
751                 return;
752
753         Node *node = prev->get_node()->get_parent();
754
755         int low_tid, high_tid;
756         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
757                 low_tid = id_to_int(act->get_tid());
758                 high_tid = low_tid + 1;
759         } else {
760                 low_tid = 0;
761                 high_tid = get_num_threads();
762         }
763
764         for (int i = low_tid; i < high_tid; i++) {
765                 thread_id_t tid = int_to_id(i);
766
767                 /* Make sure this thread can be enabled here. */
768                 if (i >= node->get_num_threads())
769                         break;
770
771                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
772                 if (node->enabled_status(tid) != THREAD_ENABLED)
773                         continue;
774
775                 /* Check if this has been explored already */
776                 if (node->has_been_explored(tid))
777                         continue;
778
779                 /* See if fairness allows */
780                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
781                         bool unfair = false;
782                         for (int t = 0; t < node->get_num_threads(); t++) {
783                                 thread_id_t tother = int_to_id(t);
784                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
785                                         unfair = true;
786                                         break;
787                                 }
788                         }
789                         if (unfair)
790                                 continue;
791                 }
792                 /* Cache the latest backtracking point */
793                 set_latest_backtrack(prev);
794
795                 /* If this is a new backtracking point, mark the tree */
796                 if (!node->set_backtrack(tid))
797                         continue;
798                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
799                                         id_to_int(prev->get_tid()),
800                                         id_to_int(t->get_id()));
801                 if (DBG_ENABLED()) {
802                         prev->print();
803                         act->print();
804                 }
805         }
806 }
807
808 /**
809  * @brief Cache the a backtracking point as the "most recent", if eligible
810  *
811  * Note that this does not prepare the NodeStack for this backtracking
812  * operation, it only caches the action on a per-execution basis
813  *
814  * @param act The operation at which we should explore a different next action
815  * (i.e., backtracking point)
816  * @return True, if this action is now the most recent backtracking point;
817  * false otherwise
818  */
819 bool ModelChecker::set_latest_backtrack(ModelAction *act)
820 {
821         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
822                 priv->next_backtrack = act;
823                 return true;
824         }
825         return false;
826 }
827
828 /**
829  * Returns last backtracking point. The model checker will explore a different
830  * path for this point in the next execution.
831  * @return The ModelAction at which the next execution should diverge.
832  */
833 ModelAction * ModelChecker::get_next_backtrack()
834 {
835         ModelAction *next = priv->next_backtrack;
836         priv->next_backtrack = NULL;
837         return next;
838 }
839
840 /**
841  * Processes a read model action.
842  * @param curr is the read model action to process.
843  * @return True if processing this read updates the mo_graph.
844  */
845 bool ModelChecker::process_read(ModelAction *curr)
846 {
847         Node *node = curr->get_node();
848         uint64_t value = VALUE_NONE;
849         bool updated = false;
850         while (true) {
851                 switch (node->get_read_from_status()) {
852                 case READ_FROM_PAST: {
853                         const ModelAction *rf = node->get_read_from_past();
854                         ASSERT(rf);
855
856                         mo_graph->startChanges();
857                         value = rf->get_value();
858                         check_recency(curr, rf);
859                         bool r_status = r_modification_order(curr, rf);
860
861                         if (is_infeasible() && node->increment_read_from()) {
862                                 mo_graph->rollbackChanges();
863                                 priv->too_many_reads = false;
864                                 continue;
865                         }
866
867                         read_from(curr, rf);
868                         mo_graph->commitChanges();
869                         mo_check_promises(curr, true);
870
871                         updated |= r_status;
872                         break;
873                 }
874                 case READ_FROM_PROMISE: {
875                         Promise *promise = curr->get_node()->get_read_from_promise();
876                         promise->add_reader(curr);
877                         value = promise->get_value();
878                         curr->set_read_from_promise(promise);
879                         mo_graph->startChanges();
880                         updated = r_modification_order(curr, promise);
881                         mo_graph->commitChanges();
882                         break;
883                 }
884                 case READ_FROM_FUTURE: {
885                         /* Read from future value */
886                         struct future_value fv = node->get_future_value();
887                         Promise *promise = new Promise(curr, fv);
888                         value = fv.value;
889                         curr->set_read_from_promise(promise);
890                         promises->push_back(promise);
891                         mo_graph->startChanges();
892                         updated = r_modification_order(curr, promise);
893                         mo_graph->commitChanges();
894                         break;
895                 }
896                 default:
897                         ASSERT(false);
898                 }
899                 get_thread(curr)->set_return_value(value);
900                 return updated;
901         }
902 }
903
904 /**
905  * Processes a lock, trylock, or unlock model action.  @param curr is
906  * the read model action to process.
907  *
908  * The try lock operation checks whether the lock is taken.  If not,
909  * it falls to the normal lock operation case.  If so, it returns
910  * fail.
911  *
912  * The lock operation has already been checked that it is enabled, so
913  * it just grabs the lock and synchronizes with the previous unlock.
914  *
915  * The unlock operation has to re-enable all of the threads that are
916  * waiting on the lock.
917  *
918  * @return True if synchronization was updated; false otherwise
919  */
920 bool ModelChecker::process_mutex(ModelAction *curr)
921 {
922         std::mutex *mutex = NULL;
923         struct std::mutex_state *state = NULL;
924
925         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
926                 mutex = (std::mutex *)curr->get_location();
927                 state = mutex->get_state();
928         } else if (curr->is_wait()) {
929                 mutex = (std::mutex *)curr->get_value();
930                 state = mutex->get_state();
931         }
932
933         switch (curr->get_type()) {
934         case ATOMIC_TRYLOCK: {
935                 bool success = !state->islocked;
936                 curr->set_try_lock(success);
937                 if (!success) {
938                         get_thread(curr)->set_return_value(0);
939                         break;
940                 }
941                 get_thread(curr)->set_return_value(1);
942         }
943                 //otherwise fall into the lock case
944         case ATOMIC_LOCK: {
945                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
946                         assert_bug("Lock access before initialization");
947                 state->islocked = true;
948                 ModelAction *unlock = get_last_unlock(curr);
949                 //synchronize with the previous unlock statement
950                 if (unlock != NULL) {
951                         curr->synchronize_with(unlock);
952                         return true;
953                 }
954                 break;
955         }
956         case ATOMIC_UNLOCK: {
957                 //unlock the lock
958                 state->islocked = false;
959                 //wake up the other threads
960                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
961                 //activate all the waiting threads
962                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
963                         scheduler->wake(get_thread(*rit));
964                 }
965                 waiters->clear();
966                 break;
967         }
968         case ATOMIC_WAIT: {
969                 //unlock the lock
970                 state->islocked = false;
971                 //wake up the other threads
972                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
973                 //activate all the waiting threads
974                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
975                         scheduler->wake(get_thread(*rit));
976                 }
977                 waiters->clear();
978                 //check whether we should go to sleep or not...simulate spurious failures
979                 if (curr->get_node()->get_misc() == 0) {
980                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
981                         //disable us
982                         scheduler->sleep(get_thread(curr));
983                 }
984                 break;
985         }
986         case ATOMIC_NOTIFY_ALL: {
987                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
988                 //activate all the waiting threads
989                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
990                         scheduler->wake(get_thread(*rit));
991                 }
992                 waiters->clear();
993                 break;
994         }
995         case ATOMIC_NOTIFY_ONE: {
996                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
997                 int wakeupthread = curr->get_node()->get_misc();
998                 action_list_t::iterator it = waiters->begin();
999                 advance(it, wakeupthread);
1000                 scheduler->wake(get_thread(*it));
1001                 waiters->erase(it);
1002                 break;
1003         }
1004
1005         default:
1006                 ASSERT(0);
1007         }
1008         return false;
1009 }
1010
1011 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1012 {
1013         /* Do more ambitious checks now that mo is more complete */
1014         if (mo_may_allow(writer, reader)) {
1015                 Node *node = reader->get_node();
1016
1017                 /* Find an ancestor thread which exists at the time of the reader */
1018                 Thread *write_thread = get_thread(writer);
1019                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1020                         write_thread = write_thread->get_parent();
1021
1022                 struct future_value fv = {
1023                         writer->get_value(),
1024                         writer->get_seq_number() + params.maxfuturedelay,
1025                         write_thread->get_id(),
1026                 };
1027                 if (node->add_future_value(fv))
1028                         set_latest_backtrack(reader);
1029         }
1030 }
1031
1032 /**
1033  * Process a write ModelAction
1034  * @param curr The ModelAction to process
1035  * @return True if the mo_graph was updated or promises were resolved
1036  */
1037 bool ModelChecker::process_write(ModelAction *curr)
1038 {
1039         bool updated_mod_order = w_modification_order(curr);
1040         bool updated_promises = resolve_promises(curr);
1041
1042         if (promises->size() == 0) {
1043                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1044                         struct PendingFutureValue pfv = (*futurevalues)[i];
1045                         add_future_value(pfv.writer, pfv.act);
1046                 }
1047                 futurevalues->clear();
1048         }
1049
1050         mo_graph->commitChanges();
1051         mo_check_promises(curr, false);
1052
1053         get_thread(curr)->set_return_value(VALUE_NONE);
1054         return updated_mod_order || updated_promises;
1055 }
1056
1057 /**
1058  * Process a fence ModelAction
1059  * @param curr The ModelAction to process
1060  * @return True if synchronization was updated
1061  */
1062 bool ModelChecker::process_fence(ModelAction *curr)
1063 {
1064         /*
1065          * fence-relaxed: no-op
1066          * fence-release: only log the occurence (not in this function), for
1067          *   use in later synchronization
1068          * fence-acquire (this function): search for hypothetical release
1069          *   sequences
1070          */
1071         bool updated = false;
1072         if (curr->is_acquire()) {
1073                 action_list_t *list = action_trace;
1074                 action_list_t::reverse_iterator rit;
1075                 /* Find X : is_read(X) && X --sb-> curr */
1076                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1077                         ModelAction *act = *rit;
1078                         if (act == curr)
1079                                 continue;
1080                         if (act->get_tid() != curr->get_tid())
1081                                 continue;
1082                         /* Stop at the beginning of the thread */
1083                         if (act->is_thread_start())
1084                                 break;
1085                         /* Stop once we reach a prior fence-acquire */
1086                         if (act->is_fence() && act->is_acquire())
1087                                 break;
1088                         if (!act->is_read())
1089                                 continue;
1090                         /* read-acquire will find its own release sequences */
1091                         if (act->is_acquire())
1092                                 continue;
1093
1094                         /* Establish hypothetical release sequences */
1095                         rel_heads_list_t release_heads;
1096                         get_release_seq_heads(curr, act, &release_heads);
1097                         for (unsigned int i = 0; i < release_heads.size(); i++)
1098                                 if (!curr->synchronize_with(release_heads[i]))
1099                                         set_bad_synchronization();
1100                         if (release_heads.size() != 0)
1101                                 updated = true;
1102                 }
1103         }
1104         return updated;
1105 }
1106
1107 /**
1108  * @brief Process the current action for thread-related activity
1109  *
1110  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1111  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1112  * synchronization, etc.  This function is a no-op for non-THREAD actions
1113  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1114  *
1115  * @param curr The current action
1116  * @return True if synchronization was updated or a thread completed
1117  */
1118 bool ModelChecker::process_thread_action(ModelAction *curr)
1119 {
1120         bool updated = false;
1121
1122         switch (curr->get_type()) {
1123         case THREAD_CREATE: {
1124                 thrd_t *thrd = (thrd_t *)curr->get_location();
1125                 struct thread_params *params = (struct thread_params *)curr->get_value();
1126                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1127                 add_thread(th);
1128                 th->set_creation(curr);
1129                 /* Promises can be satisfied by children */
1130                 for (unsigned int i = 0; i < promises->size(); i++) {
1131                         Promise *promise = (*promises)[i];
1132                         if (promise->thread_is_available(curr->get_tid()))
1133                                 promise->add_thread(th->get_id());
1134                 }
1135                 break;
1136         }
1137         case THREAD_JOIN: {
1138                 Thread *blocking = curr->get_thread_operand();
1139                 ModelAction *act = get_last_action(blocking->get_id());
1140                 curr->synchronize_with(act);
1141                 updated = true; /* trigger rel-seq checks */
1142                 break;
1143         }
1144         case THREAD_FINISH: {
1145                 Thread *th = get_thread(curr);
1146                 while (!th->wait_list_empty()) {
1147                         ModelAction *act = th->pop_wait_list();
1148                         scheduler->wake(get_thread(act));
1149                 }
1150                 th->complete();
1151                 /* Completed thread can't satisfy promises */
1152                 for (unsigned int i = 0; i < promises->size(); i++) {
1153                         Promise *promise = (*promises)[i];
1154                         if (promise->thread_is_available(th->get_id()))
1155                                 if (promise->eliminate_thread(th->get_id()))
1156                                         priv->failed_promise = true;
1157                 }
1158                 updated = true; /* trigger rel-seq checks */
1159                 break;
1160         }
1161         case THREAD_START: {
1162                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1163                 break;
1164         }
1165         default:
1166                 break;
1167         }
1168
1169         return updated;
1170 }
1171
1172 /**
1173  * @brief Process the current action for release sequence fixup activity
1174  *
1175  * Performs model-checker release sequence fixups for the current action,
1176  * forcing a single pending release sequence to break (with a given, potential
1177  * "loose" write) or to complete (i.e., synchronize). If a pending release
1178  * sequence forms a complete release sequence, then we must perform the fixup
1179  * synchronization, mo_graph additions, etc.
1180  *
1181  * @param curr The current action; must be a release sequence fixup action
1182  * @param work_queue The work queue to which to add work items as they are
1183  * generated
1184  */
1185 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1186 {
1187         const ModelAction *write = curr->get_node()->get_relseq_break();
1188         struct release_seq *sequence = pending_rel_seqs->back();
1189         pending_rel_seqs->pop_back();
1190         ASSERT(sequence);
1191         ModelAction *acquire = sequence->acquire;
1192         const ModelAction *rf = sequence->rf;
1193         const ModelAction *release = sequence->release;
1194         ASSERT(acquire);
1195         ASSERT(release);
1196         ASSERT(rf);
1197         ASSERT(release->same_thread(rf));
1198
1199         if (write == NULL) {
1200                 /**
1201                  * @todo Forcing a synchronization requires that we set
1202                  * modification order constraints. For instance, we can't allow
1203                  * a fixup sequence in which two separate read-acquire
1204                  * operations read from the same sequence, where the first one
1205                  * synchronizes and the other doesn't. Essentially, we can't
1206                  * allow any writes to insert themselves between 'release' and
1207                  * 'rf'
1208                  */
1209
1210                 /* Must synchronize */
1211                 if (!acquire->synchronize_with(release)) {
1212                         set_bad_synchronization();
1213                         return;
1214                 }
1215                 /* Re-check all pending release sequences */
1216                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1217                 /* Re-check act for mo_graph edges */
1218                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1219
1220                 /* propagate synchronization to later actions */
1221                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1222                 for (; (*rit) != acquire; rit++) {
1223                         ModelAction *propagate = *rit;
1224                         if (acquire->happens_before(propagate)) {
1225                                 propagate->synchronize_with(acquire);
1226                                 /* Re-check 'propagate' for mo_graph edges */
1227                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1228                         }
1229                 }
1230         } else {
1231                 /* Break release sequence with new edges:
1232                  *   release --mo--> write --mo--> rf */
1233                 mo_graph->addEdge(release, write);
1234                 mo_graph->addEdge(write, rf);
1235         }
1236
1237         /* See if we have realized a data race */
1238         checkDataRaces();
1239 }
1240
1241 /**
1242  * Initialize the current action by performing one or more of the following
1243  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1244  * in the NodeStack, manipulating backtracking sets, allocating and
1245  * initializing clock vectors, and computing the promises to fulfill.
1246  *
1247  * @param curr The current action, as passed from the user context; may be
1248  * freed/invalidated after the execution of this function, with a different
1249  * action "returned" its place (pass-by-reference)
1250  * @return True if curr is a newly-explored action; false otherwise
1251  */
1252 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1253 {
1254         ModelAction *newcurr;
1255
1256         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1257                 newcurr = process_rmw(*curr);
1258                 delete *curr;
1259
1260                 if (newcurr->is_rmw())
1261                         compute_promises(newcurr);
1262
1263                 *curr = newcurr;
1264                 return false;
1265         }
1266
1267         (*curr)->set_seq_number(get_next_seq_num());
1268
1269         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1270         if (newcurr) {
1271                 /* First restore type and order in case of RMW operation */
1272                 if ((*curr)->is_rmwr())
1273                         newcurr->copy_typeandorder(*curr);
1274
1275                 ASSERT((*curr)->get_location() == newcurr->get_location());
1276                 newcurr->copy_from_new(*curr);
1277
1278                 /* Discard duplicate ModelAction; use action from NodeStack */
1279                 delete *curr;
1280
1281                 /* Always compute new clock vector */
1282                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1283
1284                 *curr = newcurr;
1285                 return false; /* Action was explored previously */
1286         } else {
1287                 newcurr = *curr;
1288
1289                 /* Always compute new clock vector */
1290                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1291
1292                 /* Assign most recent release fence */
1293                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1294
1295                 /*
1296                  * Perform one-time actions when pushing new ModelAction onto
1297                  * NodeStack
1298                  */
1299                 if (newcurr->is_write())
1300                         compute_promises(newcurr);
1301                 else if (newcurr->is_relseq_fixup())
1302                         compute_relseq_breakwrites(newcurr);
1303                 else if (newcurr->is_wait())
1304                         newcurr->get_node()->set_misc_max(2);
1305                 else if (newcurr->is_notify_one()) {
1306                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1307                 }
1308                 return true; /* This was a new ModelAction */
1309         }
1310 }
1311
1312 /**
1313  * @brief Establish reads-from relation between two actions
1314  *
1315  * Perform basic operations involved with establishing a concrete rf relation,
1316  * including setting the ModelAction data and checking for release sequences.
1317  *
1318  * @param act The action that is reading (must be a read)
1319  * @param rf The action from which we are reading (must be a write)
1320  *
1321  * @return True if this read established synchronization
1322  */
1323 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1324 {
1325         act->set_read_from(rf);
1326         if (rf != NULL && act->is_acquire()) {
1327                 rel_heads_list_t release_heads;
1328                 get_release_seq_heads(act, act, &release_heads);
1329                 int num_heads = release_heads.size();
1330                 for (unsigned int i = 0; i < release_heads.size(); i++)
1331                         if (!act->synchronize_with(release_heads[i])) {
1332                                 set_bad_synchronization();
1333                                 num_heads--;
1334                         }
1335                 return num_heads > 0;
1336         }
1337         return false;
1338 }
1339
1340 /**
1341  * Check promises and eliminate potentially-satisfying threads when a thread is
1342  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1343  * no longer satisfy a promise generated from that thread.
1344  *
1345  * @param blocker The thread on which a thread is waiting
1346  * @param waiting The waiting thread
1347  */
1348 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1349 {
1350         for (unsigned int i = 0; i < promises->size(); i++) {
1351                 Promise *promise = (*promises)[i];
1352                 if (!promise->thread_is_available(waiting->get_id()))
1353                         continue;
1354                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1355                         ModelAction *reader = promise->get_reader(j);
1356                         if (reader->get_tid() != blocker->get_id())
1357                                 continue;
1358                         if (promise->eliminate_thread(waiting->get_id())) {
1359                                 /* Promise has failed */
1360                                 priv->failed_promise = true;
1361                         } else {
1362                                 /* Only eliminate the 'waiting' thread once */
1363                                 return;
1364                         }
1365                 }
1366         }
1367 }
1368
1369 /**
1370  * @brief Check whether a model action is enabled.
1371  *
1372  * Checks whether a lock or join operation would be successful (i.e., is the
1373  * lock already locked, or is the joined thread already complete). If not, put
1374  * the action in a waiter list.
1375  *
1376  * @param curr is the ModelAction to check whether it is enabled.
1377  * @return a bool that indicates whether the action is enabled.
1378  */
1379 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1380         if (curr->is_lock()) {
1381                 std::mutex *lock = (std::mutex *)curr->get_location();
1382                 struct std::mutex_state *state = lock->get_state();
1383                 if (state->islocked) {
1384                         //Stick the action in the appropriate waiting queue
1385                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1386                         return false;
1387                 }
1388         } else if (curr->get_type() == THREAD_JOIN) {
1389                 Thread *blocking = (Thread *)curr->get_location();
1390                 if (!blocking->is_complete()) {
1391                         blocking->push_wait_list(curr);
1392                         thread_blocking_check_promises(blocking, get_thread(curr));
1393                         return false;
1394                 }
1395         }
1396
1397         return true;
1398 }
1399
1400 /**
1401  * This is the heart of the model checker routine. It performs model-checking
1402  * actions corresponding to a given "current action." Among other processes, it
1403  * calculates reads-from relationships, updates synchronization clock vectors,
1404  * forms a memory_order constraints graph, and handles replay/backtrack
1405  * execution when running permutations of previously-observed executions.
1406  *
1407  * @param curr The current action to process
1408  * @return The ModelAction that is actually executed; may be different than
1409  * curr; may be NULL, if the current action is not enabled to run
1410  */
1411 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1412 {
1413         ASSERT(curr);
1414         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1415
1416         if (!check_action_enabled(curr)) {
1417                 /* Make the execution look like we chose to run this action
1418                  * much later, when a lock/join can succeed */
1419                 get_thread(curr)->set_pending(curr);
1420                 scheduler->sleep(get_thread(curr));
1421                 return NULL;
1422         }
1423
1424         bool newly_explored = initialize_curr_action(&curr);
1425
1426         DBG();
1427         if (DBG_ENABLED())
1428                 curr->print();
1429
1430         wake_up_sleeping_actions(curr);
1431
1432         /* Add the action to lists before any other model-checking tasks */
1433         if (!second_part_of_rmw)
1434                 add_action_to_lists(curr);
1435
1436         /* Build may_read_from set for newly-created actions */
1437         if (newly_explored && curr->is_read())
1438                 build_may_read_from(curr);
1439
1440         /* Initialize work_queue with the "current action" work */
1441         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1442         while (!work_queue.empty() && !has_asserted()) {
1443                 WorkQueueEntry work = work_queue.front();
1444                 work_queue.pop_front();
1445
1446                 switch (work.type) {
1447                 case WORK_CHECK_CURR_ACTION: {
1448                         ModelAction *act = work.action;
1449                         bool update = false; /* update this location's release seq's */
1450                         bool update_all = false; /* update all release seq's */
1451
1452                         if (process_thread_action(curr))
1453                                 update_all = true;
1454
1455                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1456                                 update = true;
1457
1458                         if (act->is_write() && process_write(act))
1459                                 update = true;
1460
1461                         if (act->is_fence() && process_fence(act))
1462                                 update_all = true;
1463
1464                         if (act->is_mutex_op() && process_mutex(act))
1465                                 update_all = true;
1466
1467                         if (act->is_relseq_fixup())
1468                                 process_relseq_fixup(curr, &work_queue);
1469
1470                         if (update_all)
1471                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1472                         else if (update)
1473                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1474                         break;
1475                 }
1476                 case WORK_CHECK_RELEASE_SEQ:
1477                         resolve_release_sequences(work.location, &work_queue);
1478                         break;
1479                 case WORK_CHECK_MO_EDGES: {
1480                         /** @todo Complete verification of work_queue */
1481                         ModelAction *act = work.action;
1482                         bool updated = false;
1483
1484                         if (act->is_read()) {
1485                                 const ModelAction *rf = act->get_reads_from();
1486                                 const Promise *promise = act->get_reads_from_promise();
1487                                 if (rf) {
1488                                         if (r_modification_order(act, rf))
1489                                                 updated = true;
1490                                 } else if (promise) {
1491                                         if (r_modification_order(act, promise))
1492                                                 updated = true;
1493                                 }
1494                         }
1495                         if (act->is_write()) {
1496                                 if (w_modification_order(act))
1497                                         updated = true;
1498                         }
1499                         mo_graph->commitChanges();
1500
1501                         if (updated)
1502                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1503                         break;
1504                 }
1505                 default:
1506                         ASSERT(false);
1507                         break;
1508                 }
1509         }
1510
1511         check_curr_backtracking(curr);
1512         set_backtracking(curr);
1513         return curr;
1514 }
1515
1516 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1517 {
1518         Node *currnode = curr->get_node();
1519         Node *parnode = currnode->get_parent();
1520
1521         if ((parnode && !parnode->backtrack_empty()) ||
1522                          !currnode->misc_empty() ||
1523                          !currnode->read_from_empty() ||
1524                          !currnode->promise_empty() ||
1525                          !currnode->relseq_break_empty()) {
1526                 set_latest_backtrack(curr);
1527         }
1528 }
1529
1530 bool ModelChecker::promises_expired() const
1531 {
1532         for (unsigned int i = 0; i < promises->size(); i++) {
1533                 Promise *promise = (*promises)[i];
1534                 if (promise->get_expiration() < priv->used_sequence_numbers)
1535                         return true;
1536         }
1537         return false;
1538 }
1539
1540 /**
1541  * This is the strongest feasibility check available.
1542  * @return whether the current trace (partial or complete) must be a prefix of
1543  * a feasible trace.
1544  */
1545 bool ModelChecker::isfeasibleprefix() const
1546 {
1547         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1548 }
1549
1550 /**
1551  * Print disagnostic information about an infeasible execution
1552  * @param prefix A string to prefix the output with; if NULL, then a default
1553  * message prefix will be provided
1554  */
1555 void ModelChecker::print_infeasibility(const char *prefix) const
1556 {
1557         char buf[100];
1558         char *ptr = buf;
1559         if (mo_graph->checkForCycles())
1560                 ptr += sprintf(ptr, "[mo cycle]");
1561         if (priv->failed_promise)
1562                 ptr += sprintf(ptr, "[failed promise]");
1563         if (priv->too_many_reads)
1564                 ptr += sprintf(ptr, "[too many reads]");
1565         if (priv->no_valid_reads)
1566                 ptr += sprintf(ptr, "[no valid reads-from]");
1567         if (priv->bad_synchronization)
1568                 ptr += sprintf(ptr, "[bad sw ordering]");
1569         if (promises_expired())
1570                 ptr += sprintf(ptr, "[promise expired]");
1571         if (promises->size() != 0)
1572                 ptr += sprintf(ptr, "[unresolved promise]");
1573         if (ptr != buf)
1574                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1575 }
1576
1577 /**
1578  * Returns whether the current completed trace is feasible, except for pending
1579  * release sequences.
1580  */
1581 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1582 {
1583         return !is_infeasible() && promises->size() == 0;
1584 }
1585
1586 /**
1587  * Check if the current partial trace is infeasible. Does not check any
1588  * end-of-execution flags, which might rule out the execution. Thus, this is
1589  * useful only for ruling an execution as infeasible.
1590  * @return whether the current partial trace is infeasible.
1591  */
1592 bool ModelChecker::is_infeasible() const
1593 {
1594         return mo_graph->checkForCycles() ||
1595                 priv->no_valid_reads ||
1596                 priv->failed_promise ||
1597                 priv->too_many_reads ||
1598                 priv->bad_synchronization ||
1599                 promises_expired();
1600 }
1601
1602 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1603 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1604         ModelAction *lastread = get_last_action(act->get_tid());
1605         lastread->process_rmw(act);
1606         if (act->is_rmw()) {
1607                 if (lastread->get_reads_from())
1608                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1609                 else
1610                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1611                 mo_graph->commitChanges();
1612         }
1613         return lastread;
1614 }
1615
1616 /**
1617  * Checks whether a thread has read from the same write for too many times
1618  * without seeing the effects of a later write.
1619  *
1620  * Basic idea:
1621  * 1) there must a different write that we could read from that would satisfy the modification order,
1622  * 2) we must have read from the same value in excess of maxreads times, and
1623  * 3) that other write must have been in the reads_from set for maxreads times.
1624  *
1625  * If so, we decide that the execution is no longer feasible.
1626  */
1627 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1628 {
1629         if (params.maxreads != 0) {
1630                 if (curr->get_node()->get_read_from_past_size() <= 1)
1631                         return;
1632                 //Must make sure that execution is currently feasible...  We could
1633                 //accidentally clear by rolling back
1634                 if (is_infeasible())
1635                         return;
1636                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1637                 int tid = id_to_int(curr->get_tid());
1638
1639                 /* Skip checks */
1640                 if ((int)thrd_lists->size() <= tid)
1641                         return;
1642                 action_list_t *list = &(*thrd_lists)[tid];
1643
1644                 action_list_t::reverse_iterator rit = list->rbegin();
1645                 /* Skip past curr */
1646                 for (; (*rit) != curr; rit++)
1647                         ;
1648                 /* go past curr now */
1649                 rit++;
1650
1651                 action_list_t::reverse_iterator ritcopy = rit;
1652                 //See if we have enough reads from the same value
1653                 int count = 0;
1654                 for (; count < params.maxreads; rit++, count++) {
1655                         if (rit == list->rend())
1656                                 return;
1657                         ModelAction *act = *rit;
1658                         if (!act->is_read())
1659                                 return;
1660
1661                         if (act->get_reads_from() != rf)
1662                                 return;
1663                         if (act->get_node()->get_read_from_past_size() <= 1)
1664                                 return;
1665                 }
1666                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1667                         /* Get write */
1668                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1669
1670                         /* Need a different write */
1671                         if (write == rf)
1672                                 continue;
1673
1674                         /* Test to see whether this is a feasible write to read from */
1675                         /** NOTE: all members of read-from set should be
1676                          *  feasible, so we no longer check it here **/
1677
1678                         rit = ritcopy;
1679
1680                         bool feasiblewrite = true;
1681                         //new we need to see if this write works for everyone
1682
1683                         for (int loop = count; loop > 0; loop--, rit++) {
1684                                 ModelAction *act = *rit;
1685                                 bool foundvalue = false;
1686                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1687                                         if (act->get_node()->get_read_from_past(j) == write) {
1688                                                 foundvalue = true;
1689                                                 break;
1690                                         }
1691                                 }
1692                                 if (!foundvalue) {
1693                                         feasiblewrite = false;
1694                                         break;
1695                                 }
1696                         }
1697                         if (feasiblewrite) {
1698                                 priv->too_many_reads = true;
1699                                 return;
1700                         }
1701                 }
1702         }
1703 }
1704
1705 /**
1706  * Updates the mo_graph with the constraints imposed from the current
1707  * read.
1708  *
1709  * Basic idea is the following: Go through each other thread and find
1710  * the last action that happened before our read.  Two cases:
1711  *
1712  * (1) The action is a write => that write must either occur before
1713  * the write we read from or be the write we read from.
1714  *
1715  * (2) The action is a read => the write that that action read from
1716  * must occur before the write we read from or be the same write.
1717  *
1718  * @param curr The current action. Must be a read.
1719  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1720  * @return True if modification order edges were added; false otherwise
1721  */
1722 template <typename rf_type>
1723 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1724 {
1725         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1726         unsigned int i;
1727         bool added = false;
1728         ASSERT(curr->is_read());
1729
1730         /* Last SC fence in the current thread */
1731         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1732
1733         /* Iterate over all threads */
1734         for (i = 0; i < thrd_lists->size(); i++) {
1735                 /* Last SC fence in thread i */
1736                 ModelAction *last_sc_fence_thread_local = NULL;
1737                 if (int_to_id((int)i) != curr->get_tid())
1738                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1739
1740                 /* Last SC fence in thread i, before last SC fence in current thread */
1741                 ModelAction *last_sc_fence_thread_before = NULL;
1742                 if (last_sc_fence_local)
1743                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1744
1745                 /* Iterate over actions in thread, starting from most recent */
1746                 action_list_t *list = &(*thrd_lists)[i];
1747                 action_list_t::reverse_iterator rit;
1748                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1749                         ModelAction *act = *rit;
1750
1751                         if (act->is_write() && !act->equals(rf) && act != curr) {
1752                                 /* C++, Section 29.3 statement 5 */
1753                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1754                                                 *act < *last_sc_fence_thread_local) {
1755                                         added = mo_graph->addEdge(act, rf) || added;
1756                                         break;
1757                                 }
1758                                 /* C++, Section 29.3 statement 4 */
1759                                 else if (act->is_seqcst() && last_sc_fence_local &&
1760                                                 *act < *last_sc_fence_local) {
1761                                         added = mo_graph->addEdge(act, rf) || added;
1762                                         break;
1763                                 }
1764                                 /* C++, Section 29.3 statement 6 */
1765                                 else if (last_sc_fence_thread_before &&
1766                                                 *act < *last_sc_fence_thread_before) {
1767                                         added = mo_graph->addEdge(act, rf) || added;
1768                                         break;
1769                                 }
1770                         }
1771
1772                         /*
1773                          * Include at most one act per-thread that "happens
1774                          * before" curr. Don't consider reflexively.
1775                          */
1776                         if (act->happens_before(curr) && act != curr) {
1777                                 if (act->is_write()) {
1778                                         if (!act->equals(rf)) {
1779                                                 added = mo_graph->addEdge(act, rf) || added;
1780                                         }
1781                                 } else {
1782                                         const ModelAction *prevrf = act->get_reads_from();
1783                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1784                                         if (prevrf) {
1785                                                 if (!prevrf->equals(rf))
1786                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1787                                         } else if (!prevrf_promise->equals(rf)) {
1788                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1789                                         }
1790                                 }
1791                                 break;
1792                         }
1793                 }
1794         }
1795
1796         /*
1797          * All compatible, thread-exclusive promises must be ordered after any
1798          * concrete loads from the same thread
1799          */
1800         for (unsigned int i = 0; i < promises->size(); i++)
1801                 if ((*promises)[i]->is_compatible_exclusive(curr))
1802                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1803
1804         return added;
1805 }
1806
1807 /**
1808  * Updates the mo_graph with the constraints imposed from the current write.
1809  *
1810  * Basic idea is the following: Go through each other thread and find
1811  * the lastest action that happened before our write.  Two cases:
1812  *
1813  * (1) The action is a write => that write must occur before
1814  * the current write
1815  *
1816  * (2) The action is a read => the write that that action read from
1817  * must occur before the current write.
1818  *
1819  * This method also handles two other issues:
1820  *
1821  * (I) Sequential Consistency: Making sure that if the current write is
1822  * seq_cst, that it occurs after the previous seq_cst write.
1823  *
1824  * (II) Sending the write back to non-synchronizing reads.
1825  *
1826  * @param curr The current action. Must be a write.
1827  * @return True if modification order edges were added; false otherwise
1828  */
1829 bool ModelChecker::w_modification_order(ModelAction *curr)
1830 {
1831         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1832         unsigned int i;
1833         bool added = false;
1834         ASSERT(curr->is_write());
1835
1836         if (curr->is_seqcst()) {
1837                 /* We have to at least see the last sequentially consistent write,
1838                          so we are initialized. */
1839                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1840                 if (last_seq_cst != NULL) {
1841                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1842                 }
1843         }
1844
1845         /* Last SC fence in the current thread */
1846         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1847
1848         /* Iterate over all threads */
1849         for (i = 0; i < thrd_lists->size(); i++) {
1850                 /* Last SC fence in thread i, before last SC fence in current thread */
1851                 ModelAction *last_sc_fence_thread_before = NULL;
1852                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1853                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1854
1855                 /* Iterate over actions in thread, starting from most recent */
1856                 action_list_t *list = &(*thrd_lists)[i];
1857                 action_list_t::reverse_iterator rit;
1858                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1859                         ModelAction *act = *rit;
1860                         if (act == curr) {
1861                                 /*
1862                                  * 1) If RMW and it actually read from something, then we
1863                                  * already have all relevant edges, so just skip to next
1864                                  * thread.
1865                                  *
1866                                  * 2) If RMW and it didn't read from anything, we should
1867                                  * whatever edge we can get to speed up convergence.
1868                                  *
1869                                  * 3) If normal write, we need to look at earlier actions, so
1870                                  * continue processing list.
1871                                  */
1872                                 if (curr->is_rmw()) {
1873                                         if (curr->get_reads_from() != NULL)
1874                                                 break;
1875                                         else
1876                                                 continue;
1877                                 } else
1878                                         continue;
1879                         }
1880
1881                         /* C++, Section 29.3 statement 7 */
1882                         if (last_sc_fence_thread_before && act->is_write() &&
1883                                         *act < *last_sc_fence_thread_before) {
1884                                 added = mo_graph->addEdge(act, curr) || added;
1885                                 break;
1886                         }
1887
1888                         /*
1889                          * Include at most one act per-thread that "happens
1890                          * before" curr
1891                          */
1892                         if (act->happens_before(curr)) {
1893                                 /*
1894                                  * Note: if act is RMW, just add edge:
1895                                  *   act --mo--> curr
1896                                  * The following edge should be handled elsewhere:
1897                                  *   readfrom(act) --mo--> act
1898                                  */
1899                                 if (act->is_write())
1900                                         added = mo_graph->addEdge(act, curr) || added;
1901                                 else if (act->is_read()) {
1902                                         //if previous read accessed a null, just keep going
1903                                         if (act->get_reads_from() == NULL)
1904                                                 continue;
1905                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1906                                 }
1907                                 break;
1908                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1909                                                      !act->same_thread(curr)) {
1910                                 /* We have an action that:
1911                                    (1) did not happen before us
1912                                    (2) is a read and we are a write
1913                                    (3) cannot synchronize with us
1914                                    (4) is in a different thread
1915                                    =>
1916                                    that read could potentially read from our write.  Note that
1917                                    these checks are overly conservative at this point, we'll
1918                                    do more checks before actually removing the
1919                                    pendingfuturevalue.
1920
1921                                  */
1922                                 if (thin_air_constraint_may_allow(curr, act)) {
1923                                         if (!is_infeasible())
1924                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1925                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1926                                                 add_future_value(curr, act);
1927                                 }
1928                         }
1929                 }
1930         }
1931
1932         /*
1933          * All compatible, thread-exclusive promises must be ordered after any
1934          * concrete stores to the same thread, or else they can be merged with
1935          * this store later
1936          */
1937         for (unsigned int i = 0; i < promises->size(); i++)
1938                 if ((*promises)[i]->is_compatible_exclusive(curr))
1939                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1940
1941         return added;
1942 }
1943
1944 /** Arbitrary reads from the future are not allowed.  Section 29.3
1945  * part 9 places some constraints.  This method checks one result of constraint
1946  * constraint.  Others require compiler support. */
1947 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1948 {
1949         if (!writer->is_rmw())
1950                 return true;
1951
1952         if (!reader->is_rmw())
1953                 return true;
1954
1955         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1956                 if (search == reader)
1957                         return false;
1958                 if (search->get_tid() == reader->get_tid() &&
1959                                 search->happens_before(reader))
1960                         break;
1961         }
1962
1963         return true;
1964 }
1965
1966 /**
1967  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1968  * some constraints. This method checks one the following constraint (others
1969  * require compiler support):
1970  *
1971  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1972  */
1973 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1974 {
1975         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1976         unsigned int i;
1977         /* Iterate over all threads */
1978         for (i = 0; i < thrd_lists->size(); i++) {
1979                 const ModelAction *write_after_read = NULL;
1980
1981                 /* Iterate over actions in thread, starting from most recent */
1982                 action_list_t *list = &(*thrd_lists)[i];
1983                 action_list_t::reverse_iterator rit;
1984                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1985                         ModelAction *act = *rit;
1986
1987                         /* Don't disallow due to act == reader */
1988                         if (!reader->happens_before(act) || reader == act)
1989                                 break;
1990                         else if (act->is_write())
1991                                 write_after_read = act;
1992                         else if (act->is_read() && act->get_reads_from() != NULL)
1993                                 write_after_read = act->get_reads_from();
1994                 }
1995
1996                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1997                         return false;
1998         }
1999         return true;
2000 }
2001
2002 /**
2003  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2004  * The ModelAction under consideration is expected to be taking part in
2005  * release/acquire synchronization as an object of the "reads from" relation.
2006  * Note that this can only provide release sequence support for RMW chains
2007  * which do not read from the future, as those actions cannot be traced until
2008  * their "promise" is fulfilled. Similarly, we may not even establish the
2009  * presence of a release sequence with certainty, as some modification order
2010  * constraints may be decided further in the future. Thus, this function
2011  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2012  * and a boolean representing certainty.
2013  *
2014  * @param rf The action that might be part of a release sequence. Must be a
2015  * write.
2016  * @param release_heads A pass-by-reference style return parameter. After
2017  * execution of this function, release_heads will contain the heads of all the
2018  * relevant release sequences, if any exists with certainty
2019  * @param pending A pass-by-reference style return parameter which is only used
2020  * when returning false (i.e., uncertain). Returns most information regarding
2021  * an uncertain release sequence, including any write operations that might
2022  * break the sequence.
2023  * @return true, if the ModelChecker is certain that release_heads is complete;
2024  * false otherwise
2025  */
2026 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2027                 rel_heads_list_t *release_heads,
2028                 struct release_seq *pending) const
2029 {
2030         /* Only check for release sequences if there are no cycles */
2031         if (mo_graph->checkForCycles())
2032                 return false;
2033
2034         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2035                 ASSERT(rf->is_write());
2036
2037                 if (rf->is_release())
2038                         release_heads->push_back(rf);
2039                 else if (rf->get_last_fence_release())
2040                         release_heads->push_back(rf->get_last_fence_release());
2041                 if (!rf->is_rmw())
2042                         break; /* End of RMW chain */
2043
2044                 /** @todo Need to be smarter here...  In the linux lock
2045                  * example, this will run to the beginning of the program for
2046                  * every acquire. */
2047                 /** @todo The way to be smarter here is to keep going until 1
2048                  * thread has a release preceded by an acquire and you've seen
2049                  *       both. */
2050
2051                 /* acq_rel RMW is a sufficient stopping condition */
2052                 if (rf->is_acquire() && rf->is_release())
2053                         return true; /* complete */
2054         };
2055         if (!rf) {
2056                 /* read from future: need to settle this later */
2057                 pending->rf = NULL;
2058                 return false; /* incomplete */
2059         }
2060
2061         if (rf->is_release())
2062                 return true; /* complete */
2063
2064         /* else relaxed write
2065          * - check for fence-release in the same thread (29.8, stmt. 3)
2066          * - check modification order for contiguous subsequence
2067          *   -> rf must be same thread as release */
2068
2069         const ModelAction *fence_release = rf->get_last_fence_release();
2070         /* Synchronize with a fence-release unconditionally; we don't need to
2071          * find any more "contiguous subsequence..." for it */
2072         if (fence_release)
2073                 release_heads->push_back(fence_release);
2074
2075         int tid = id_to_int(rf->get_tid());
2076         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2077         action_list_t *list = &(*thrd_lists)[tid];
2078         action_list_t::const_reverse_iterator rit;
2079
2080         /* Find rf in the thread list */
2081         rit = std::find(list->rbegin(), list->rend(), rf);
2082         ASSERT(rit != list->rend());
2083
2084         /* Find the last {write,fence}-release */
2085         for (; rit != list->rend(); rit++) {
2086                 if (fence_release && *(*rit) < *fence_release)
2087                         break;
2088                 if ((*rit)->is_release())
2089                         break;
2090         }
2091         if (rit == list->rend()) {
2092                 /* No write-release in this thread */
2093                 return true; /* complete */
2094         } else if (fence_release && *(*rit) < *fence_release) {
2095                 /* The fence-release is more recent (and so, "stronger") than
2096                  * the most recent write-release */
2097                 return true; /* complete */
2098         } /* else, need to establish contiguous release sequence */
2099         ModelAction *release = *rit;
2100
2101         ASSERT(rf->same_thread(release));
2102
2103         pending->writes.clear();
2104
2105         bool certain = true;
2106         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2107                 if (id_to_int(rf->get_tid()) == (int)i)
2108                         continue;
2109                 list = &(*thrd_lists)[i];
2110
2111                 /* Can we ensure no future writes from this thread may break
2112                  * the release seq? */
2113                 bool future_ordered = false;
2114
2115                 ModelAction *last = get_last_action(int_to_id(i));
2116                 Thread *th = get_thread(int_to_id(i));
2117                 if ((last && rf->happens_before(last)) ||
2118                                 !is_enabled(th) ||
2119                                 th->is_complete())
2120                         future_ordered = true;
2121
2122                 ASSERT(!th->is_model_thread() || future_ordered);
2123
2124                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2125                         const ModelAction *act = *rit;
2126                         /* Reach synchronization -> this thread is complete */
2127                         if (act->happens_before(release))
2128                                 break;
2129                         if (rf->happens_before(act)) {
2130                                 future_ordered = true;
2131                                 continue;
2132                         }
2133
2134                         /* Only non-RMW writes can break release sequences */
2135                         if (!act->is_write() || act->is_rmw())
2136                                 continue;
2137
2138                         /* Check modification order */
2139                         if (mo_graph->checkReachable(rf, act)) {
2140                                 /* rf --mo--> act */
2141                                 future_ordered = true;
2142                                 continue;
2143                         }
2144                         if (mo_graph->checkReachable(act, release))
2145                                 /* act --mo--> release */
2146                                 break;
2147                         if (mo_graph->checkReachable(release, act) &&
2148                                       mo_graph->checkReachable(act, rf)) {
2149                                 /* release --mo-> act --mo--> rf */
2150                                 return true; /* complete */
2151                         }
2152                         /* act may break release sequence */
2153                         pending->writes.push_back(act);
2154                         certain = false;
2155                 }
2156                 if (!future_ordered)
2157                         certain = false; /* This thread is uncertain */
2158         }
2159
2160         if (certain) {
2161                 release_heads->push_back(release);
2162                 pending->writes.clear();
2163         } else {
2164                 pending->release = release;
2165                 pending->rf = rf;
2166         }
2167         return certain;
2168 }
2169
2170 /**
2171  * An interface for getting the release sequence head(s) with which a
2172  * given ModelAction must synchronize. This function only returns a non-empty
2173  * result when it can locate a release sequence head with certainty. Otherwise,
2174  * it may mark the internal state of the ModelChecker so that it will handle
2175  * the release sequence at a later time, causing @a acquire to update its
2176  * synchronization at some later point in execution.
2177  *
2178  * @param acquire The 'acquire' action that may synchronize with a release
2179  * sequence
2180  * @param read The read action that may read from a release sequence; this may
2181  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2182  * when 'acquire' is a fence-acquire)
2183  * @param release_heads A pass-by-reference return parameter. Will be filled
2184  * with the head(s) of the release sequence(s), if they exists with certainty.
2185  * @see ModelChecker::release_seq_heads
2186  */
2187 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2188                 ModelAction *read, rel_heads_list_t *release_heads)
2189 {
2190         const ModelAction *rf = read->get_reads_from();
2191         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2192         sequence->acquire = acquire;
2193         sequence->read = read;
2194
2195         if (!release_seq_heads(rf, release_heads, sequence)) {
2196                 /* add act to 'lazy checking' list */
2197                 pending_rel_seqs->push_back(sequence);
2198         } else {
2199                 snapshot_free(sequence);
2200         }
2201 }
2202
2203 /**
2204  * Attempt to resolve all stashed operations that might synchronize with a
2205  * release sequence for a given location. This implements the "lazy" portion of
2206  * determining whether or not a release sequence was contiguous, since not all
2207  * modification order information is present at the time an action occurs.
2208  *
2209  * @param location The location/object that should be checked for release
2210  * sequence resolutions. A NULL value means to check all locations.
2211  * @param work_queue The work queue to which to add work items as they are
2212  * generated
2213  * @return True if any updates occurred (new synchronization, new mo_graph
2214  * edges)
2215  */
2216 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2217 {
2218         bool updated = false;
2219         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2220         while (it != pending_rel_seqs->end()) {
2221                 struct release_seq *pending = *it;
2222                 ModelAction *acquire = pending->acquire;
2223                 const ModelAction *read = pending->read;
2224
2225                 /* Only resolve sequences on the given location, if provided */
2226                 if (location && read->get_location() != location) {
2227                         it++;
2228                         continue;
2229                 }
2230
2231                 const ModelAction *rf = read->get_reads_from();
2232                 rel_heads_list_t release_heads;
2233                 bool complete;
2234                 complete = release_seq_heads(rf, &release_heads, pending);
2235                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2236                         if (!acquire->has_synchronized_with(release_heads[i])) {
2237                                 if (acquire->synchronize_with(release_heads[i]))
2238                                         updated = true;
2239                                 else
2240                                         set_bad_synchronization();
2241                         }
2242                 }
2243
2244                 if (updated) {
2245                         /* Re-check all pending release sequences */
2246                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2247                         /* Re-check read-acquire for mo_graph edges */
2248                         if (acquire->is_read())
2249                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2250
2251                         /* propagate synchronization to later actions */
2252                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2253                         for (; (*rit) != acquire; rit++) {
2254                                 ModelAction *propagate = *rit;
2255                                 if (acquire->happens_before(propagate)) {
2256                                         propagate->synchronize_with(acquire);
2257                                         /* Re-check 'propagate' for mo_graph edges */
2258                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2259                                 }
2260                         }
2261                 }
2262                 if (complete) {
2263                         it = pending_rel_seqs->erase(it);
2264                         snapshot_free(pending);
2265                 } else {
2266                         it++;
2267                 }
2268         }
2269
2270         // If we resolved promises or data races, see if we have realized a data race.
2271         checkDataRaces();
2272
2273         return updated;
2274 }
2275
2276 /**
2277  * Performs various bookkeeping operations for the current ModelAction. For
2278  * instance, adds action to the per-object, per-thread action vector and to the
2279  * action trace list of all thread actions.
2280  *
2281  * @param act is the ModelAction to add.
2282  */
2283 void ModelChecker::add_action_to_lists(ModelAction *act)
2284 {
2285         int tid = id_to_int(act->get_tid());
2286         ModelAction *uninit = NULL;
2287         int uninit_id = -1;
2288         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2289         if (list->empty() && act->is_atomic_var()) {
2290                 uninit = new_uninitialized_action(act->get_location());
2291                 uninit_id = id_to_int(uninit->get_tid());
2292                 list->push_back(uninit);
2293         }
2294         list->push_back(act);
2295
2296         action_trace->push_back(act);
2297         if (uninit)
2298                 action_trace->push_front(uninit);
2299
2300         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2301         if (tid >= (int)vec->size())
2302                 vec->resize(priv->next_thread_id);
2303         (*vec)[tid].push_back(act);
2304         if (uninit)
2305                 (*vec)[uninit_id].push_front(uninit);
2306
2307         if ((int)thrd_last_action->size() <= tid)
2308                 thrd_last_action->resize(get_num_threads());
2309         (*thrd_last_action)[tid] = act;
2310         if (uninit)
2311                 (*thrd_last_action)[uninit_id] = uninit;
2312
2313         if (act->is_fence() && act->is_release()) {
2314                 if ((int)thrd_last_fence_release->size() <= tid)
2315                         thrd_last_fence_release->resize(get_num_threads());
2316                 (*thrd_last_fence_release)[tid] = act;
2317         }
2318
2319         if (act->is_wait()) {
2320                 void *mutex_loc = (void *) act->get_value();
2321                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2322
2323                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2324                 if (tid >= (int)vec->size())
2325                         vec->resize(priv->next_thread_id);
2326                 (*vec)[tid].push_back(act);
2327         }
2328 }
2329
2330 /**
2331  * @brief Get the last action performed by a particular Thread
2332  * @param tid The thread ID of the Thread in question
2333  * @return The last action in the thread
2334  */
2335 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2336 {
2337         int threadid = id_to_int(tid);
2338         if (threadid < (int)thrd_last_action->size())
2339                 return (*thrd_last_action)[id_to_int(tid)];
2340         else
2341                 return NULL;
2342 }
2343
2344 /**
2345  * @brief Get the last fence release performed by a particular Thread
2346  * @param tid The thread ID of the Thread in question
2347  * @return The last fence release in the thread, if one exists; NULL otherwise
2348  */
2349 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2350 {
2351         int threadid = id_to_int(tid);
2352         if (threadid < (int)thrd_last_fence_release->size())
2353                 return (*thrd_last_fence_release)[id_to_int(tid)];
2354         else
2355                 return NULL;
2356 }
2357
2358 /**
2359  * Gets the last memory_order_seq_cst write (in the total global sequence)
2360  * performed on a particular object (i.e., memory location), not including the
2361  * current action.
2362  * @param curr The current ModelAction; also denotes the object location to
2363  * check
2364  * @return The last seq_cst write
2365  */
2366 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2367 {
2368         void *location = curr->get_location();
2369         action_list_t *list = get_safe_ptr_action(obj_map, location);
2370         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2371         action_list_t::reverse_iterator rit;
2372         for (rit = list->rbegin(); rit != list->rend(); rit++)
2373                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2374                         return *rit;
2375         return NULL;
2376 }
2377
2378 /**
2379  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2380  * performed in a particular thread, prior to a particular fence.
2381  * @param tid The ID of the thread to check
2382  * @param before_fence The fence from which to begin the search; if NULL, then
2383  * search for the most recent fence in the thread.
2384  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2385  */
2386 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2387 {
2388         /* All fences should have NULL location */
2389         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2390         action_list_t::reverse_iterator rit = list->rbegin();
2391
2392         if (before_fence) {
2393                 for (; rit != list->rend(); rit++)
2394                         if (*rit == before_fence)
2395                                 break;
2396
2397                 ASSERT(*rit == before_fence);
2398                 rit++;
2399         }
2400
2401         for (; rit != list->rend(); rit++)
2402                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2403                         return *rit;
2404         return NULL;
2405 }
2406
2407 /**
2408  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2409  * location). This function identifies the mutex according to the current
2410  * action, which is presumed to perform on the same mutex.
2411  * @param curr The current ModelAction; also denotes the object location to
2412  * check
2413  * @return The last unlock operation
2414  */
2415 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2416 {
2417         void *location = curr->get_location();
2418         action_list_t *list = get_safe_ptr_action(obj_map, location);
2419         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2420         action_list_t::reverse_iterator rit;
2421         for (rit = list->rbegin(); rit != list->rend(); rit++)
2422                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2423                         return *rit;
2424         return NULL;
2425 }
2426
2427 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2428 {
2429         ModelAction *parent = get_last_action(tid);
2430         if (!parent)
2431                 parent = get_thread(tid)->get_creation();
2432         return parent;
2433 }
2434
2435 /**
2436  * Returns the clock vector for a given thread.
2437  * @param tid The thread whose clock vector we want
2438  * @return Desired clock vector
2439  */
2440 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2441 {
2442         return get_parent_action(tid)->get_cv();
2443 }
2444
2445 /**
2446  * Resolve a set of Promises with a current write. The set is provided in the
2447  * Node corresponding to @a write.
2448  * @param write The ModelAction that is fulfilling Promises
2449  * @return True if promises were resolved; false otherwise
2450  */
2451 bool ModelChecker::resolve_promises(ModelAction *write)
2452 {
2453         bool haveResolved = false;
2454         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2455         promise_list_t mustResolve, resolved;
2456
2457         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2458                 Promise *promise = (*promises)[promise_index];
2459                 if (write->get_node()->get_promise(i)) {
2460                         for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2461                                 ModelAction *read = promise->get_reader(j);
2462                                 read_from(read, write);
2463                                 actions_to_check.push_back(read);
2464                         }
2465                         //Make sure the promise's value matches the write's value
2466                         ASSERT(promise->is_compatible(write));
2467                         mo_graph->resolvePromise(promise, write, &mustResolve);
2468
2469                         resolved.push_back(promise);
2470                         promises->erase(promises->begin() + promise_index);
2471
2472                         haveResolved = true;
2473                 } else
2474                         promise_index++;
2475         }
2476
2477         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2478                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2479                                 == resolved.end())
2480                         priv->failed_promise = true;
2481         }
2482         for (unsigned int i = 0; i < resolved.size(); i++)
2483                 delete resolved[i];
2484         //Check whether reading these writes has made threads unable to
2485         //resolve promises
2486
2487         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2488                 ModelAction *read = actions_to_check[i];
2489                 mo_check_promises(read, true);
2490         }
2491
2492         return haveResolved;
2493 }
2494
2495 /**
2496  * Compute the set of promises that could potentially be satisfied by this
2497  * action. Note that the set computation actually appears in the Node, not in
2498  * ModelChecker.
2499  * @param curr The ModelAction that may satisfy promises
2500  */
2501 void ModelChecker::compute_promises(ModelAction *curr)
2502 {
2503         for (unsigned int i = 0; i < promises->size(); i++) {
2504                 Promise *promise = (*promises)[i];
2505                 if (!promise->is_compatible(curr) || promise->get_value() != curr->get_value())
2506                         continue;
2507
2508                 bool satisfy = true;
2509                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2510                         const ModelAction *act = promise->get_reader(j);
2511                         if (act->happens_before(curr) ||
2512                                         act->could_synchronize_with(curr)) {
2513                                 satisfy = false;
2514                                 break;
2515                         }
2516                 }
2517                 if (satisfy)
2518                         curr->get_node()->set_promise(i);
2519         }
2520 }
2521
2522 /** Checks promises in response to change in ClockVector Threads. */
2523 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2524 {
2525         for (unsigned int i = 0; i < promises->size(); i++) {
2526                 Promise *promise = (*promises)[i];
2527                 if (!promise->thread_is_available(tid))
2528                         continue;
2529                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2530                         const ModelAction *act = promise->get_reader(j);
2531                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2532                                         merge_cv->synchronized_since(act)) {
2533                                 if (promise->eliminate_thread(tid)) {
2534                                         /* Promise has failed */
2535                                         priv->failed_promise = true;
2536                                         return;
2537                                 }
2538                         }
2539                 }
2540         }
2541 }
2542
2543 void ModelChecker::check_promises_thread_disabled()
2544 {
2545         for (unsigned int i = 0; i < promises->size(); i++) {
2546                 Promise *promise = (*promises)[i];
2547                 if (promise->has_failed()) {
2548                         priv->failed_promise = true;
2549                         return;
2550                 }
2551         }
2552 }
2553
2554 /**
2555  * @brief Checks promises in response to addition to modification order for
2556  * threads.
2557  *
2558  * We test whether threads are still available for satisfying promises after an
2559  * addition to our modification order constraints. Those that are unavailable
2560  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2561  * that promise has failed.
2562  *
2563  * @param act The ModelAction which updated the modification order
2564  * @param is_read_check Should be true if act is a read and we must check for
2565  * updates to the store from which it read (there is a distinction here for
2566  * RMW's, which are both a load and a store)
2567  */
2568 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2569 {
2570         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2571
2572         for (unsigned int i = 0; i < promises->size(); i++) {
2573                 Promise *promise = (*promises)[i];
2574
2575                 // Is this promise on the same location?
2576                 if (promise->get_value() != write->get_value())
2577                         continue;
2578
2579                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2580                         const ModelAction *pread = promise->get_reader(j);
2581                         if (!pread->happens_before(act))
2582                                continue;
2583                         if (mo_graph->checkPromise(write, promise)) {
2584                                 priv->failed_promise = true;
2585                                 return;
2586                         }
2587                         break;
2588                 }
2589
2590                 // Don't do any lookups twice for the same thread
2591                 if (!promise->thread_is_available(act->get_tid()))
2592                         continue;
2593
2594                 if (mo_graph->checkReachable(promise, write)) {
2595                         if (mo_graph->checkPromise(write, promise)) {
2596                                 priv->failed_promise = true;
2597                                 return;
2598                         }
2599                 }
2600         }
2601 }
2602
2603 /**
2604  * Compute the set of writes that may break the current pending release
2605  * sequence. This information is extracted from previou release sequence
2606  * calculations.
2607  *
2608  * @param curr The current ModelAction. Must be a release sequence fixup
2609  * action.
2610  */
2611 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2612 {
2613         if (pending_rel_seqs->empty())
2614                 return;
2615
2616         struct release_seq *pending = pending_rel_seqs->back();
2617         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2618                 const ModelAction *write = pending->writes[i];
2619                 curr->get_node()->add_relseq_break(write);
2620         }
2621
2622         /* NULL means don't break the sequence; just synchronize */
2623         curr->get_node()->add_relseq_break(NULL);
2624 }
2625
2626 /**
2627  * Build up an initial set of all past writes that this 'read' action may read
2628  * from, as well as any previously-observed future values that must still be valid.
2629  *
2630  * @param curr is the current ModelAction that we are exploring; it must be a
2631  * 'read' operation.
2632  */
2633 void ModelChecker::build_may_read_from(ModelAction *curr)
2634 {
2635         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2636         unsigned int i;
2637         ASSERT(curr->is_read());
2638
2639         ModelAction *last_sc_write = NULL;
2640
2641         if (curr->is_seqcst())
2642                 last_sc_write = get_last_seq_cst_write(curr);
2643
2644         /* Iterate over all threads */
2645         for (i = 0; i < thrd_lists->size(); i++) {
2646                 /* Iterate over actions in thread, starting from most recent */
2647                 action_list_t *list = &(*thrd_lists)[i];
2648                 action_list_t::reverse_iterator rit;
2649                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2650                         ModelAction *act = *rit;
2651
2652                         /* Only consider 'write' actions */
2653                         if (!act->is_write() || act == curr)
2654                                 continue;
2655
2656                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2657                         bool allow_read = true;
2658
2659                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2660                                 allow_read = false;
2661                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2662                                 allow_read = false;
2663
2664                         if (allow_read) {
2665                                 /* Only add feasible reads */
2666                                 mo_graph->startChanges();
2667                                 r_modification_order(curr, act);
2668                                 if (!is_infeasible())
2669                                         curr->get_node()->add_read_from_past(act);
2670                                 mo_graph->rollbackChanges();
2671                         }
2672
2673                         /* Include at most one act per-thread that "happens before" curr */
2674                         if (act->happens_before(curr))
2675                                 break;
2676                 }
2677         }
2678
2679         /* Inherit existing, promised future values */
2680         for (i = 0; i < promises->size(); i++) {
2681                 const Promise *promise = (*promises)[i];
2682                 const ModelAction *promise_read = promise->get_reader(0);
2683                 if (promise_read->same_var(curr)) {
2684                         /* Only add feasible future-values */
2685                         mo_graph->startChanges();
2686                         r_modification_order(curr, promise);
2687                         if (!is_infeasible())
2688                                 curr->get_node()->add_read_from_promise(promise_read);
2689                         mo_graph->rollbackChanges();
2690                 }
2691         }
2692
2693         /* We may find no valid may-read-from only if the execution is doomed */
2694         if (!curr->get_node()->read_from_size()) {
2695                 priv->no_valid_reads = true;
2696                 set_assert();
2697         }
2698
2699         if (DBG_ENABLED()) {
2700                 model_print("Reached read action:\n");
2701                 curr->print();
2702                 model_print("Printing read_from_past\n");
2703                 curr->get_node()->print_read_from_past();
2704                 model_print("End printing read_from_past\n");
2705         }
2706 }
2707
2708 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2709 {
2710         for ( ; write != NULL; write = write->get_reads_from()) {
2711                 /* UNINIT actions don't have a Node, and they never sleep */
2712                 if (write->is_uninitialized())
2713                         return true;
2714                 Node *prevnode = write->get_node()->get_parent();
2715
2716                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2717                 if (write->is_release() && thread_sleep)
2718                         return true;
2719                 if (!write->is_rmw())
2720                         return false;
2721         }
2722         return true;
2723 }
2724
2725 /**
2726  * @brief Create a new action representing an uninitialized atomic
2727  * @param location The memory location of the atomic object
2728  * @return A pointer to a new ModelAction
2729  */
2730 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2731 {
2732         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2733         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2734         act->create_cv(NULL);
2735         return act;
2736 }
2737
2738 static void print_list(action_list_t *list)
2739 {
2740         action_list_t::iterator it;
2741
2742         model_print("---------------------------------------------------------------------\n");
2743
2744         unsigned int hash = 0;
2745
2746         for (it = list->begin(); it != list->end(); it++) {
2747                 (*it)->print();
2748                 hash = hash^(hash<<3)^((*it)->hash());
2749         }
2750         model_print("HASH %u\n", hash);
2751         model_print("---------------------------------------------------------------------\n");
2752 }
2753
2754 #if SUPPORT_MOD_ORDER_DUMP
2755 void ModelChecker::dumpGraph(char *filename) const
2756 {
2757         char buffer[200];
2758         sprintf(buffer, "%s.dot", filename);
2759         FILE *file = fopen(buffer, "w");
2760         fprintf(file, "digraph %s {\n", filename);
2761         mo_graph->dumpNodes(file);
2762         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2763
2764         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2765                 ModelAction *action = *it;
2766                 if (action->is_read()) {
2767                         fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2768                         if (action->get_reads_from() != NULL)
2769                                 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2770                 }
2771                 if (thread_array[action->get_tid()] != NULL) {
2772                         fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2773                 }
2774
2775                 thread_array[action->get_tid()] = action;
2776         }
2777         fprintf(file, "}\n");
2778         model_free(thread_array);
2779         fclose(file);
2780 }
2781 #endif
2782
2783 /** @brief Prints an execution trace summary. */
2784 void ModelChecker::print_summary() const
2785 {
2786 #if SUPPORT_MOD_ORDER_DUMP
2787         char buffername[100];
2788         sprintf(buffername, "exec%04u", stats.num_total);
2789         mo_graph->dumpGraphToFile(buffername);
2790         sprintf(buffername, "graph%04u", stats.num_total);
2791         dumpGraph(buffername);
2792 #endif
2793
2794         model_print("Execution %d:", stats.num_total);
2795         if (isfeasibleprefix())
2796                 model_print("\n");
2797         else
2798                 print_infeasibility(" INFEASIBLE");
2799         print_list(action_trace);
2800         model_print("\n");
2801 }
2802
2803 /**
2804  * Add a Thread to the system for the first time. Should only be called once
2805  * per thread.
2806  * @param t The Thread to add
2807  */
2808 void ModelChecker::add_thread(Thread *t)
2809 {
2810         thread_map->put(id_to_int(t->get_id()), t);
2811         scheduler->add_thread(t);
2812 }
2813
2814 /**
2815  * Removes a thread from the scheduler.
2816  * @param the thread to remove.
2817  */
2818 void ModelChecker::remove_thread(Thread *t)
2819 {
2820         scheduler->remove_thread(t);
2821 }
2822
2823 /**
2824  * @brief Get a Thread reference by its ID
2825  * @param tid The Thread's ID
2826  * @return A Thread reference
2827  */
2828 Thread * ModelChecker::get_thread(thread_id_t tid) const
2829 {
2830         return thread_map->get(id_to_int(tid));
2831 }
2832
2833 /**
2834  * @brief Get a reference to the Thread in which a ModelAction was executed
2835  * @param act The ModelAction
2836  * @return A Thread reference
2837  */
2838 Thread * ModelChecker::get_thread(const ModelAction *act) const
2839 {
2840         return get_thread(act->get_tid());
2841 }
2842
2843 /**
2844  * @brief Get a Promise's "promise number"
2845  *
2846  * A "promise number" is an index number that is unique to a promise, valid
2847  * only for a specific snapshot of an execution trace. Promises may come and go
2848  * as they are generated an resolved, so an index only retains meaning for the
2849  * current snapshot.
2850  *
2851  * @param promise The Promise to check
2852  * @return The promise index, if the promise still is valid; otherwise -1
2853  */
2854 int ModelChecker::get_promise_number(const Promise *promise) const
2855 {
2856         for (unsigned int i = 0; i < promises->size(); i++)
2857                 if ((*promises)[i] == promise)
2858                         return i;
2859         /* Not found */
2860         return -1;
2861 }
2862
2863 /**
2864  * @brief Check if a Thread is currently enabled
2865  * @param t The Thread to check
2866  * @return True if the Thread is currently enabled
2867  */
2868 bool ModelChecker::is_enabled(Thread *t) const
2869 {
2870         return scheduler->is_enabled(t);
2871 }
2872
2873 /**
2874  * @brief Check if a Thread is currently enabled
2875  * @param tid The ID of the Thread to check
2876  * @return True if the Thread is currently enabled
2877  */
2878 bool ModelChecker::is_enabled(thread_id_t tid) const
2879 {
2880         return scheduler->is_enabled(tid);
2881 }
2882
2883 /**
2884  * Switch from a model-checker context to a user-thread context. This is the
2885  * complement of ModelChecker::switch_to_master and must be called from the
2886  * model-checker context
2887  *
2888  * @param thread The user-thread to switch to
2889  */
2890 void ModelChecker::switch_from_master(Thread *thread)
2891 {
2892         scheduler->set_current_thread(thread);
2893         Thread::swap(&system_context, thread);
2894 }
2895
2896 /**
2897  * Switch from a user-context to the "master thread" context (a.k.a. system
2898  * context). This switch is made with the intention of exploring a particular
2899  * model-checking action (described by a ModelAction object). Must be called
2900  * from a user-thread context.
2901  *
2902  * @param act The current action that will be explored. May be NULL only if
2903  * trace is exiting via an assertion (see ModelChecker::set_assert and
2904  * ModelChecker::has_asserted).
2905  * @return Return the value returned by the current action
2906  */
2907 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2908 {
2909         DBG();
2910         Thread *old = thread_current();
2911         ASSERT(!old->get_pending());
2912         old->set_pending(act);
2913         if (Thread::swap(old, &system_context) < 0) {
2914                 perror("swap threads");
2915                 exit(EXIT_FAILURE);
2916         }
2917         return old->get_return_value();
2918 }
2919
2920 /**
2921  * Takes the next step in the execution, if possible.
2922  * @param curr The current step to take
2923  * @return Returns the next Thread to run, if any; NULL if this execution
2924  * should terminate
2925  */
2926 Thread * ModelChecker::take_step(ModelAction *curr)
2927 {
2928         Thread *curr_thrd = get_thread(curr);
2929         ASSERT(curr_thrd->get_state() == THREAD_READY);
2930
2931         curr = check_current_action(curr);
2932
2933         /* Infeasible -> don't take any more steps */
2934         if (is_infeasible())
2935                 return NULL;
2936         else if (isfeasibleprefix() && have_bug_reports()) {
2937                 set_assert();
2938                 return NULL;
2939         }
2940
2941         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2942                 return NULL;
2943
2944         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2945                 scheduler->remove_thread(curr_thrd);
2946
2947         Thread *next_thrd = get_next_thread(curr);
2948
2949         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2950                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2951
2952         return next_thrd;
2953 }
2954
2955 /** Wrapper to run the user's main function, with appropriate arguments */
2956 void user_main_wrapper(void *)
2957 {
2958         user_main(model->params.argc, model->params.argv);
2959 }
2960
2961 /** @brief Run ModelChecker for the user program */
2962 void ModelChecker::run()
2963 {
2964         do {
2965                 thrd_t user_thread;
2966                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2967                 add_thread(t);
2968
2969                 do {
2970                         /*
2971                          * Stash next pending action(s) for thread(s). There
2972                          * should only need to stash one thread's action--the
2973                          * thread which just took a step--plus the first step
2974                          * for any newly-created thread
2975                          */
2976                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2977                                 thread_id_t tid = int_to_id(i);
2978                                 Thread *thr = get_thread(tid);
2979                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2980                                         switch_from_master(thr);
2981                                 }
2982                         }
2983
2984                         /* Catch assertions from prior take_step or from
2985                          * between-ModelAction bugs (e.g., data races) */
2986                         if (has_asserted())
2987                                 break;
2988
2989                         /* Consume the next action for a Thread */
2990                         ModelAction *curr = t->get_pending();
2991                         t->set_pending(NULL);
2992                         t = take_step(curr);
2993                 } while (t && !t->is_model_thread());
2994
2995                 /*
2996                  * Launch end-of-execution release sequence fixups only when
2997                  * the execution is otherwise feasible AND there are:
2998                  *
2999                  * (1) pending release sequences
3000                  * (2) pending assertions that could be invalidated by a change
3001                  * in clock vectors (i.e., data races)
3002                  * (3) no pending promises
3003                  */
3004                 while (!pending_rel_seqs->empty() &&
3005                                 is_feasible_prefix_ignore_relseq() &&
3006                                 !unrealizedraces.empty()) {
3007                         model_print("*** WARNING: release sequence fixup action "
3008                                         "(%zu pending release seuqence(s)) ***\n",
3009                                         pending_rel_seqs->size());
3010                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3011                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3012                                         model_thread);
3013                         take_step(fixup);
3014                 };
3015         } while (next_execution());
3016
3017         model_print("******* Model-checking complete: *******\n");
3018         print_stats();
3019 }