promise: add 'same_value' helper, force value-checking in CycleGraph
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else if (scheduler->all_threads_sleeping())
487                 stats.num_redundant++;
488         else
489                 ASSERT(false);
490 }
491
492 /** @brief Print execution stats */
493 void ModelChecker::print_stats() const
494 {
495         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
496         model_print("Number of redundant executions: %d\n", stats.num_redundant);
497         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
498         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
499         model_print("Total executions: %d\n", stats.num_total);
500         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
501 }
502
503 /**
504  * @brief End-of-exeuction print
505  * @param printbugs Should any existing bugs be printed?
506  */
507 void ModelChecker::print_execution(bool printbugs) const
508 {
509         print_program_output();
510
511         if (DBG_ENABLED() || params.verbose) {
512                 model_print("Earliest divergence point since last feasible execution:\n");
513                 if (earliest_diverge)
514                         earliest_diverge->print();
515                 else
516                         model_print("(Not set)\n");
517
518                 model_print("\n");
519                 print_stats();
520         }
521
522         /* Don't print invalid bugs */
523         if (printbugs)
524                 print_bugs();
525
526         model_print("\n");
527         print_summary();
528 }
529
530 /**
531  * Queries the model-checker for more executions to explore and, if one
532  * exists, resets the model-checker state to execute a new execution.
533  *
534  * @return If there are more executions to explore, return true. Otherwise,
535  * return false.
536  */
537 bool ModelChecker::next_execution()
538 {
539         DBG();
540         /* Is this execution a feasible execution that's worth bug-checking? */
541         bool complete = isfeasibleprefix() && (is_complete_execution() ||
542                         have_bug_reports());
543
544         /* End-of-execution bug checks */
545         if (complete) {
546                 if (is_deadlocked())
547                         assert_bug("Deadlock detected");
548
549                 checkDataRaces();
550         }
551
552         record_stats();
553
554         /* Output */
555         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
556                 print_execution(complete);
557         else
558                 clear_program_output();
559
560         if (complete)
561                 earliest_diverge = NULL;
562
563         if ((diverge = get_next_backtrack()) == NULL)
564                 return false;
565
566         if (DBG_ENABLED()) {
567                 model_print("Next execution will diverge at:\n");
568                 diverge->print();
569         }
570
571         reset_to_initial_state();
572         return true;
573 }
574
575 /**
576  * @brief Find the last fence-related backtracking conflict for a ModelAction
577  *
578  * This function performs the search for the most recent conflicting action
579  * against which we should perform backtracking, as affected by fence
580  * operations. This includes pairs of potentially-synchronizing actions which
581  * occur due to fence-acquire or fence-release, and hence should be explored in
582  * the opposite execution order.
583  *
584  * @param act The current action
585  * @return The most recent action which conflicts with act due to fences
586  */
587 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
588 {
589         /* Only perform release/acquire fence backtracking for stores */
590         if (!act->is_write())
591                 return NULL;
592
593         /* Find a fence-release (or, act is a release) */
594         ModelAction *last_release;
595         if (act->is_release())
596                 last_release = act;
597         else
598                 last_release = get_last_fence_release(act->get_tid());
599         if (!last_release)
600                 return NULL;
601
602         /* Skip past the release */
603         action_list_t *list = action_trace;
604         action_list_t::reverse_iterator rit;
605         for (rit = list->rbegin(); rit != list->rend(); rit++)
606                 if (*rit == last_release)
607                         break;
608         ASSERT(rit != list->rend());
609
610         /* Find a prior:
611          *   load-acquire
612          * or
613          *   load --sb-> fence-acquire */
614         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
615         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
616         bool found_acquire_fences = false;
617         for ( ; rit != list->rend(); rit++) {
618                 ModelAction *prev = *rit;
619                 if (act->same_thread(prev))
620                         continue;
621
622                 int tid = id_to_int(prev->get_tid());
623
624                 if (prev->is_read() && act->same_var(prev)) {
625                         if (prev->is_acquire()) {
626                                 /* Found most recent load-acquire, don't need
627                                  * to search for more fences */
628                                 if (!found_acquire_fences)
629                                         return NULL;
630                         } else {
631                                 prior_loads[tid] = prev;
632                         }
633                 }
634                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
635                         found_acquire_fences = true;
636                         acquire_fences[tid] = prev;
637                 }
638         }
639
640         ModelAction *latest_backtrack = NULL;
641         for (unsigned int i = 0; i < acquire_fences.size(); i++)
642                 if (acquire_fences[i] && prior_loads[i])
643                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
644                                 latest_backtrack = acquire_fences[i];
645         return latest_backtrack;
646 }
647
648 /**
649  * @brief Find the last backtracking conflict for a ModelAction
650  *
651  * This function performs the search for the most recent conflicting action
652  * against which we should perform backtracking. This primary includes pairs of
653  * synchronizing actions which should be explored in the opposite execution
654  * order.
655  *
656  * @param act The current action
657  * @return The most recent action which conflicts with act
658  */
659 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
660 {
661         switch (act->get_type()) {
662         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
663         case ATOMIC_READ:
664         case ATOMIC_WRITE:
665         case ATOMIC_RMW: {
666                 ModelAction *ret = NULL;
667
668                 /* linear search: from most recent to oldest */
669                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
670                 action_list_t::reverse_iterator rit;
671                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
672                         ModelAction *prev = *rit;
673                         if (prev->could_synchronize_with(act)) {
674                                 ret = prev;
675                                 break;
676                         }
677                 }
678
679                 ModelAction *ret2 = get_last_fence_conflict(act);
680                 if (!ret2)
681                         return ret;
682                 if (!ret)
683                         return ret2;
684                 if (*ret < *ret2)
685                         return ret2;
686                 return ret;
687         }
688         case ATOMIC_LOCK:
689         case ATOMIC_TRYLOCK: {
690                 /* linear search: from most recent to oldest */
691                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
692                 action_list_t::reverse_iterator rit;
693                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
694                         ModelAction *prev = *rit;
695                         if (act->is_conflicting_lock(prev))
696                                 return prev;
697                 }
698                 break;
699         }
700         case ATOMIC_UNLOCK: {
701                 /* linear search: from most recent to oldest */
702                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
703                 action_list_t::reverse_iterator rit;
704                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
705                         ModelAction *prev = *rit;
706                         if (!act->same_thread(prev) && prev->is_failed_trylock())
707                                 return prev;
708                 }
709                 break;
710         }
711         case ATOMIC_WAIT: {
712                 /* linear search: from most recent to oldest */
713                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
714                 action_list_t::reverse_iterator rit;
715                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
716                         ModelAction *prev = *rit;
717                         if (!act->same_thread(prev) && prev->is_failed_trylock())
718                                 return prev;
719                         if (!act->same_thread(prev) && prev->is_notify())
720                                 return prev;
721                 }
722                 break;
723         }
724
725         case ATOMIC_NOTIFY_ALL:
726         case ATOMIC_NOTIFY_ONE: {
727                 /* linear search: from most recent to oldest */
728                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
729                 action_list_t::reverse_iterator rit;
730                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
731                         ModelAction *prev = *rit;
732                         if (!act->same_thread(prev) && prev->is_wait())
733                                 return prev;
734                 }
735                 break;
736         }
737         default:
738                 break;
739         }
740         return NULL;
741 }
742
743 /** This method finds backtracking points where we should try to
744  * reorder the parameter ModelAction against.
745  *
746  * @param the ModelAction to find backtracking points for.
747  */
748 void ModelChecker::set_backtracking(ModelAction *act)
749 {
750         Thread *t = get_thread(act);
751         ModelAction *prev = get_last_conflict(act);
752         if (prev == NULL)
753                 return;
754
755         Node *node = prev->get_node()->get_parent();
756
757         int low_tid, high_tid;
758         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
759                 low_tid = id_to_int(act->get_tid());
760                 high_tid = low_tid + 1;
761         } else {
762                 low_tid = 0;
763                 high_tid = get_num_threads();
764         }
765
766         for (int i = low_tid; i < high_tid; i++) {
767                 thread_id_t tid = int_to_id(i);
768
769                 /* Make sure this thread can be enabled here. */
770                 if (i >= node->get_num_threads())
771                         break;
772
773                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774                 if (node->enabled_status(tid) != THREAD_ENABLED)
775                         continue;
776
777                 /* Check if this has been explored already */
778                 if (node->has_been_explored(tid))
779                         continue;
780
781                 /* See if fairness allows */
782                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
783                         bool unfair = false;
784                         for (int t = 0; t < node->get_num_threads(); t++) {
785                                 thread_id_t tother = int_to_id(t);
786                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
787                                         unfair = true;
788                                         break;
789                                 }
790                         }
791                         if (unfair)
792                                 continue;
793                 }
794                 /* Cache the latest backtracking point */
795                 set_latest_backtrack(prev);
796
797                 /* If this is a new backtracking point, mark the tree */
798                 if (!node->set_backtrack(tid))
799                         continue;
800                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
801                                         id_to_int(prev->get_tid()),
802                                         id_to_int(t->get_id()));
803                 if (DBG_ENABLED()) {
804                         prev->print();
805                         act->print();
806                 }
807         }
808 }
809
810 /**
811  * @brief Cache the a backtracking point as the "most recent", if eligible
812  *
813  * Note that this does not prepare the NodeStack for this backtracking
814  * operation, it only caches the action on a per-execution basis
815  *
816  * @param act The operation at which we should explore a different next action
817  * (i.e., backtracking point)
818  * @return True, if this action is now the most recent backtracking point;
819  * false otherwise
820  */
821 bool ModelChecker::set_latest_backtrack(ModelAction *act)
822 {
823         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
824                 priv->next_backtrack = act;
825                 return true;
826         }
827         return false;
828 }
829
830 /**
831  * Returns last backtracking point. The model checker will explore a different
832  * path for this point in the next execution.
833  * @return The ModelAction at which the next execution should diverge.
834  */
835 ModelAction * ModelChecker::get_next_backtrack()
836 {
837         ModelAction *next = priv->next_backtrack;
838         priv->next_backtrack = NULL;
839         return next;
840 }
841
842 /**
843  * Processes a read model action.
844  * @param curr is the read model action to process.
845  * @return True if processing this read updates the mo_graph.
846  */
847 bool ModelChecker::process_read(ModelAction *curr)
848 {
849         Node *node = curr->get_node();
850         uint64_t value = VALUE_NONE;
851         bool updated = false;
852         while (true) {
853                 switch (node->get_read_from_status()) {
854                 case READ_FROM_PAST: {
855                         const ModelAction *rf = node->get_read_from_past();
856                         ASSERT(rf);
857
858                         mo_graph->startChanges();
859                         value = rf->get_value();
860                         check_recency(curr, rf);
861                         bool r_status = r_modification_order(curr, rf);
862
863                         if (is_infeasible() && node->increment_read_from()) {
864                                 mo_graph->rollbackChanges();
865                                 priv->too_many_reads = false;
866                                 continue;
867                         }
868
869                         read_from(curr, rf);
870                         mo_graph->commitChanges();
871                         mo_check_promises(curr, true);
872
873                         updated |= r_status;
874                         break;
875                 }
876                 case READ_FROM_PROMISE: {
877                         Promise *promise = curr->get_node()->get_read_from_promise();
878                         promise->add_reader(curr);
879                         value = promise->get_value();
880                         curr->set_read_from_promise(promise);
881                         mo_graph->startChanges();
882                         updated = r_modification_order(curr, promise);
883                         mo_graph->commitChanges();
884                         break;
885                 }
886                 case READ_FROM_FUTURE: {
887                         /* Read from future value */
888                         struct future_value fv = node->get_future_value();
889                         Promise *promise = new Promise(curr, fv);
890                         value = fv.value;
891                         curr->set_read_from_promise(promise);
892                         promises->push_back(promise);
893                         mo_graph->startChanges();
894                         updated = r_modification_order(curr, promise);
895                         mo_graph->commitChanges();
896                         break;
897                 }
898                 default:
899                         ASSERT(false);
900                 }
901                 get_thread(curr)->set_return_value(value);
902                 return updated;
903         }
904 }
905
906 /**
907  * Processes a lock, trylock, or unlock model action.  @param curr is
908  * the read model action to process.
909  *
910  * The try lock operation checks whether the lock is taken.  If not,
911  * it falls to the normal lock operation case.  If so, it returns
912  * fail.
913  *
914  * The lock operation has already been checked that it is enabled, so
915  * it just grabs the lock and synchronizes with the previous unlock.
916  *
917  * The unlock operation has to re-enable all of the threads that are
918  * waiting on the lock.
919  *
920  * @return True if synchronization was updated; false otherwise
921  */
922 bool ModelChecker::process_mutex(ModelAction *curr)
923 {
924         std::mutex *mutex = NULL;
925         struct std::mutex_state *state = NULL;
926
927         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
928                 mutex = (std::mutex *)curr->get_location();
929                 state = mutex->get_state();
930         } else if (curr->is_wait()) {
931                 mutex = (std::mutex *)curr->get_value();
932                 state = mutex->get_state();
933         }
934
935         switch (curr->get_type()) {
936         case ATOMIC_TRYLOCK: {
937                 bool success = !state->islocked;
938                 curr->set_try_lock(success);
939                 if (!success) {
940                         get_thread(curr)->set_return_value(0);
941                         break;
942                 }
943                 get_thread(curr)->set_return_value(1);
944         }
945                 //otherwise fall into the lock case
946         case ATOMIC_LOCK: {
947                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
948                         assert_bug("Lock access before initialization");
949                 state->islocked = true;
950                 ModelAction *unlock = get_last_unlock(curr);
951                 //synchronize with the previous unlock statement
952                 if (unlock != NULL) {
953                         curr->synchronize_with(unlock);
954                         return true;
955                 }
956                 break;
957         }
958         case ATOMIC_UNLOCK: {
959                 //unlock the lock
960                 state->islocked = false;
961                 //wake up the other threads
962                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
963                 //activate all the waiting threads
964                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
965                         scheduler->wake(get_thread(*rit));
966                 }
967                 waiters->clear();
968                 break;
969         }
970         case ATOMIC_WAIT: {
971                 //unlock the lock
972                 state->islocked = false;
973                 //wake up the other threads
974                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
975                 //activate all the waiting threads
976                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
977                         scheduler->wake(get_thread(*rit));
978                 }
979                 waiters->clear();
980                 //check whether we should go to sleep or not...simulate spurious failures
981                 if (curr->get_node()->get_misc() == 0) {
982                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
983                         //disable us
984                         scheduler->sleep(get_thread(curr));
985                 }
986                 break;
987         }
988         case ATOMIC_NOTIFY_ALL: {
989                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
990                 //activate all the waiting threads
991                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
992                         scheduler->wake(get_thread(*rit));
993                 }
994                 waiters->clear();
995                 break;
996         }
997         case ATOMIC_NOTIFY_ONE: {
998                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
999                 int wakeupthread = curr->get_node()->get_misc();
1000                 action_list_t::iterator it = waiters->begin();
1001                 advance(it, wakeupthread);
1002                 scheduler->wake(get_thread(*it));
1003                 waiters->erase(it);
1004                 break;
1005         }
1006
1007         default:
1008                 ASSERT(0);
1009         }
1010         return false;
1011 }
1012
1013 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1014 {
1015         /* Do more ambitious checks now that mo is more complete */
1016         if (mo_may_allow(writer, reader)) {
1017                 Node *node = reader->get_node();
1018
1019                 /* Find an ancestor thread which exists at the time of the reader */
1020                 Thread *write_thread = get_thread(writer);
1021                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1022                         write_thread = write_thread->get_parent();
1023
1024                 struct future_value fv = {
1025                         writer->get_write_value(),
1026                         writer->get_seq_number() + params.maxfuturedelay,
1027                         write_thread->get_id(),
1028                 };
1029                 if (node->add_future_value(fv))
1030                         set_latest_backtrack(reader);
1031         }
1032 }
1033
1034 /**
1035  * Process a write ModelAction
1036  * @param curr The ModelAction to process
1037  * @return True if the mo_graph was updated or promises were resolved
1038  */
1039 bool ModelChecker::process_write(ModelAction *curr)
1040 {
1041         bool updated_mod_order = w_modification_order(curr);
1042         bool updated_promises = resolve_promises(curr);
1043
1044         if (promises->size() == 0) {
1045                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1046                         struct PendingFutureValue pfv = (*futurevalues)[i];
1047                         add_future_value(pfv.writer, pfv.act);
1048                 }
1049                 futurevalues->clear();
1050         }
1051
1052         mo_graph->commitChanges();
1053         mo_check_promises(curr, false);
1054
1055         get_thread(curr)->set_return_value(VALUE_NONE);
1056         return updated_mod_order || updated_promises;
1057 }
1058
1059 /**
1060  * Process a fence ModelAction
1061  * @param curr The ModelAction to process
1062  * @return True if synchronization was updated
1063  */
1064 bool ModelChecker::process_fence(ModelAction *curr)
1065 {
1066         /*
1067          * fence-relaxed: no-op
1068          * fence-release: only log the occurence (not in this function), for
1069          *   use in later synchronization
1070          * fence-acquire (this function): search for hypothetical release
1071          *   sequences
1072          */
1073         bool updated = false;
1074         if (curr->is_acquire()) {
1075                 action_list_t *list = action_trace;
1076                 action_list_t::reverse_iterator rit;
1077                 /* Find X : is_read(X) && X --sb-> curr */
1078                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1079                         ModelAction *act = *rit;
1080                         if (act == curr)
1081                                 continue;
1082                         if (act->get_tid() != curr->get_tid())
1083                                 continue;
1084                         /* Stop at the beginning of the thread */
1085                         if (act->is_thread_start())
1086                                 break;
1087                         /* Stop once we reach a prior fence-acquire */
1088                         if (act->is_fence() && act->is_acquire())
1089                                 break;
1090                         if (!act->is_read())
1091                                 continue;
1092                         /* read-acquire will find its own release sequences */
1093                         if (act->is_acquire())
1094                                 continue;
1095
1096                         /* Establish hypothetical release sequences */
1097                         rel_heads_list_t release_heads;
1098                         get_release_seq_heads(curr, act, &release_heads);
1099                         for (unsigned int i = 0; i < release_heads.size(); i++)
1100                                 if (!curr->synchronize_with(release_heads[i]))
1101                                         set_bad_synchronization();
1102                         if (release_heads.size() != 0)
1103                                 updated = true;
1104                 }
1105         }
1106         return updated;
1107 }
1108
1109 /**
1110  * @brief Process the current action for thread-related activity
1111  *
1112  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1113  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1114  * synchronization, etc.  This function is a no-op for non-THREAD actions
1115  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1116  *
1117  * @param curr The current action
1118  * @return True if synchronization was updated or a thread completed
1119  */
1120 bool ModelChecker::process_thread_action(ModelAction *curr)
1121 {
1122         bool updated = false;
1123
1124         switch (curr->get_type()) {
1125         case THREAD_CREATE: {
1126                 thrd_t *thrd = (thrd_t *)curr->get_location();
1127                 struct thread_params *params = (struct thread_params *)curr->get_value();
1128                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1129                 add_thread(th);
1130                 th->set_creation(curr);
1131                 /* Promises can be satisfied by children */
1132                 for (unsigned int i = 0; i < promises->size(); i++) {
1133                         Promise *promise = (*promises)[i];
1134                         if (promise->thread_is_available(curr->get_tid()))
1135                                 promise->add_thread(th->get_id());
1136                 }
1137                 break;
1138         }
1139         case THREAD_JOIN: {
1140                 Thread *blocking = curr->get_thread_operand();
1141                 ModelAction *act = get_last_action(blocking->get_id());
1142                 curr->synchronize_with(act);
1143                 updated = true; /* trigger rel-seq checks */
1144                 break;
1145         }
1146         case THREAD_FINISH: {
1147                 Thread *th = get_thread(curr);
1148                 while (!th->wait_list_empty()) {
1149                         ModelAction *act = th->pop_wait_list();
1150                         scheduler->wake(get_thread(act));
1151                 }
1152                 th->complete();
1153                 /* Completed thread can't satisfy promises */
1154                 for (unsigned int i = 0; i < promises->size(); i++) {
1155                         Promise *promise = (*promises)[i];
1156                         if (promise->thread_is_available(th->get_id()))
1157                                 if (promise->eliminate_thread(th->get_id()))
1158                                         priv->failed_promise = true;
1159                 }
1160                 updated = true; /* trigger rel-seq checks */
1161                 break;
1162         }
1163         case THREAD_START: {
1164                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1165                 break;
1166         }
1167         default:
1168                 break;
1169         }
1170
1171         return updated;
1172 }
1173
1174 /**
1175  * @brief Process the current action for release sequence fixup activity
1176  *
1177  * Performs model-checker release sequence fixups for the current action,
1178  * forcing a single pending release sequence to break (with a given, potential
1179  * "loose" write) or to complete (i.e., synchronize). If a pending release
1180  * sequence forms a complete release sequence, then we must perform the fixup
1181  * synchronization, mo_graph additions, etc.
1182  *
1183  * @param curr The current action; must be a release sequence fixup action
1184  * @param work_queue The work queue to which to add work items as they are
1185  * generated
1186  */
1187 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1188 {
1189         const ModelAction *write = curr->get_node()->get_relseq_break();
1190         struct release_seq *sequence = pending_rel_seqs->back();
1191         pending_rel_seqs->pop_back();
1192         ASSERT(sequence);
1193         ModelAction *acquire = sequence->acquire;
1194         const ModelAction *rf = sequence->rf;
1195         const ModelAction *release = sequence->release;
1196         ASSERT(acquire);
1197         ASSERT(release);
1198         ASSERT(rf);
1199         ASSERT(release->same_thread(rf));
1200
1201         if (write == NULL) {
1202                 /**
1203                  * @todo Forcing a synchronization requires that we set
1204                  * modification order constraints. For instance, we can't allow
1205                  * a fixup sequence in which two separate read-acquire
1206                  * operations read from the same sequence, where the first one
1207                  * synchronizes and the other doesn't. Essentially, we can't
1208                  * allow any writes to insert themselves between 'release' and
1209                  * 'rf'
1210                  */
1211
1212                 /* Must synchronize */
1213                 if (!acquire->synchronize_with(release)) {
1214                         set_bad_synchronization();
1215                         return;
1216                 }
1217                 /* Re-check all pending release sequences */
1218                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1219                 /* Re-check act for mo_graph edges */
1220                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1221
1222                 /* propagate synchronization to later actions */
1223                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1224                 for (; (*rit) != acquire; rit++) {
1225                         ModelAction *propagate = *rit;
1226                         if (acquire->happens_before(propagate)) {
1227                                 propagate->synchronize_with(acquire);
1228                                 /* Re-check 'propagate' for mo_graph edges */
1229                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1230                         }
1231                 }
1232         } else {
1233                 /* Break release sequence with new edges:
1234                  *   release --mo--> write --mo--> rf */
1235                 mo_graph->addEdge(release, write);
1236                 mo_graph->addEdge(write, rf);
1237         }
1238
1239         /* See if we have realized a data race */
1240         checkDataRaces();
1241 }
1242
1243 /**
1244  * Initialize the current action by performing one or more of the following
1245  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1246  * in the NodeStack, manipulating backtracking sets, allocating and
1247  * initializing clock vectors, and computing the promises to fulfill.
1248  *
1249  * @param curr The current action, as passed from the user context; may be
1250  * freed/invalidated after the execution of this function, with a different
1251  * action "returned" its place (pass-by-reference)
1252  * @return True if curr is a newly-explored action; false otherwise
1253  */
1254 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1255 {
1256         ModelAction *newcurr;
1257
1258         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1259                 newcurr = process_rmw(*curr);
1260                 delete *curr;
1261
1262                 if (newcurr->is_rmw())
1263                         compute_promises(newcurr);
1264
1265                 *curr = newcurr;
1266                 return false;
1267         }
1268
1269         (*curr)->set_seq_number(get_next_seq_num());
1270
1271         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1272         if (newcurr) {
1273                 /* First restore type and order in case of RMW operation */
1274                 if ((*curr)->is_rmwr())
1275                         newcurr->copy_typeandorder(*curr);
1276
1277                 ASSERT((*curr)->get_location() == newcurr->get_location());
1278                 newcurr->copy_from_new(*curr);
1279
1280                 /* Discard duplicate ModelAction; use action from NodeStack */
1281                 delete *curr;
1282
1283                 /* Always compute new clock vector */
1284                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1285
1286                 *curr = newcurr;
1287                 return false; /* Action was explored previously */
1288         } else {
1289                 newcurr = *curr;
1290
1291                 /* Always compute new clock vector */
1292                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1293
1294                 /* Assign most recent release fence */
1295                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1296
1297                 /*
1298                  * Perform one-time actions when pushing new ModelAction onto
1299                  * NodeStack
1300                  */
1301                 if (newcurr->is_write())
1302                         compute_promises(newcurr);
1303                 else if (newcurr->is_relseq_fixup())
1304                         compute_relseq_breakwrites(newcurr);
1305                 else if (newcurr->is_wait())
1306                         newcurr->get_node()->set_misc_max(2);
1307                 else if (newcurr->is_notify_one()) {
1308                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1309                 }
1310                 return true; /* This was a new ModelAction */
1311         }
1312 }
1313
1314 /**
1315  * @brief Establish reads-from relation between two actions
1316  *
1317  * Perform basic operations involved with establishing a concrete rf relation,
1318  * including setting the ModelAction data and checking for release sequences.
1319  *
1320  * @param act The action that is reading (must be a read)
1321  * @param rf The action from which we are reading (must be a write)
1322  *
1323  * @return True if this read established synchronization
1324  */
1325 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1326 {
1327         ASSERT(rf);
1328         act->set_read_from(rf);
1329         if (act->is_acquire()) {
1330                 rel_heads_list_t release_heads;
1331                 get_release_seq_heads(act, act, &release_heads);
1332                 int num_heads = release_heads.size();
1333                 for (unsigned int i = 0; i < release_heads.size(); i++)
1334                         if (!act->synchronize_with(release_heads[i])) {
1335                                 set_bad_synchronization();
1336                                 num_heads--;
1337                         }
1338                 return num_heads > 0;
1339         }
1340         return false;
1341 }
1342
1343 /**
1344  * Check promises and eliminate potentially-satisfying threads when a thread is
1345  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1346  * no longer satisfy a promise generated from that thread.
1347  *
1348  * @param blocker The thread on which a thread is waiting
1349  * @param waiting The waiting thread
1350  */
1351 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1352 {
1353         for (unsigned int i = 0; i < promises->size(); i++) {
1354                 Promise *promise = (*promises)[i];
1355                 if (!promise->thread_is_available(waiting->get_id()))
1356                         continue;
1357                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1358                         ModelAction *reader = promise->get_reader(j);
1359                         if (reader->get_tid() != blocker->get_id())
1360                                 continue;
1361                         if (promise->eliminate_thread(waiting->get_id())) {
1362                                 /* Promise has failed */
1363                                 priv->failed_promise = true;
1364                         } else {
1365                                 /* Only eliminate the 'waiting' thread once */
1366                                 return;
1367                         }
1368                 }
1369         }
1370 }
1371
1372 /**
1373  * @brief Check whether a model action is enabled.
1374  *
1375  * Checks whether a lock or join operation would be successful (i.e., is the
1376  * lock already locked, or is the joined thread already complete). If not, put
1377  * the action in a waiter list.
1378  *
1379  * @param curr is the ModelAction to check whether it is enabled.
1380  * @return a bool that indicates whether the action is enabled.
1381  */
1382 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1383         if (curr->is_lock()) {
1384                 std::mutex *lock = (std::mutex *)curr->get_location();
1385                 struct std::mutex_state *state = lock->get_state();
1386                 if (state->islocked) {
1387                         //Stick the action in the appropriate waiting queue
1388                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1389                         return false;
1390                 }
1391         } else if (curr->get_type() == THREAD_JOIN) {
1392                 Thread *blocking = (Thread *)curr->get_location();
1393                 if (!blocking->is_complete()) {
1394                         blocking->push_wait_list(curr);
1395                         thread_blocking_check_promises(blocking, get_thread(curr));
1396                         return false;
1397                 }
1398         }
1399
1400         return true;
1401 }
1402
1403 /**
1404  * This is the heart of the model checker routine. It performs model-checking
1405  * actions corresponding to a given "current action." Among other processes, it
1406  * calculates reads-from relationships, updates synchronization clock vectors,
1407  * forms a memory_order constraints graph, and handles replay/backtrack
1408  * execution when running permutations of previously-observed executions.
1409  *
1410  * @param curr The current action to process
1411  * @return The ModelAction that is actually executed; may be different than
1412  * curr; may be NULL, if the current action is not enabled to run
1413  */
1414 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1415 {
1416         ASSERT(curr);
1417         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1418
1419         if (!check_action_enabled(curr)) {
1420                 /* Make the execution look like we chose to run this action
1421                  * much later, when a lock/join can succeed */
1422                 get_thread(curr)->set_pending(curr);
1423                 scheduler->sleep(get_thread(curr));
1424                 return NULL;
1425         }
1426
1427         bool newly_explored = initialize_curr_action(&curr);
1428
1429         DBG();
1430         if (DBG_ENABLED())
1431                 curr->print();
1432
1433         wake_up_sleeping_actions(curr);
1434
1435         /* Add the action to lists before any other model-checking tasks */
1436         if (!second_part_of_rmw)
1437                 add_action_to_lists(curr);
1438
1439         /* Build may_read_from set for newly-created actions */
1440         if (newly_explored && curr->is_read())
1441                 build_may_read_from(curr);
1442
1443         /* Initialize work_queue with the "current action" work */
1444         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1445         while (!work_queue.empty() && !has_asserted()) {
1446                 WorkQueueEntry work = work_queue.front();
1447                 work_queue.pop_front();
1448
1449                 switch (work.type) {
1450                 case WORK_CHECK_CURR_ACTION: {
1451                         ModelAction *act = work.action;
1452                         bool update = false; /* update this location's release seq's */
1453                         bool update_all = false; /* update all release seq's */
1454
1455                         if (process_thread_action(curr))
1456                                 update_all = true;
1457
1458                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1459                                 update = true;
1460
1461                         if (act->is_write() && process_write(act))
1462                                 update = true;
1463
1464                         if (act->is_fence() && process_fence(act))
1465                                 update_all = true;
1466
1467                         if (act->is_mutex_op() && process_mutex(act))
1468                                 update_all = true;
1469
1470                         if (act->is_relseq_fixup())
1471                                 process_relseq_fixup(curr, &work_queue);
1472
1473                         if (update_all)
1474                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1475                         else if (update)
1476                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1477                         break;
1478                 }
1479                 case WORK_CHECK_RELEASE_SEQ:
1480                         resolve_release_sequences(work.location, &work_queue);
1481                         break;
1482                 case WORK_CHECK_MO_EDGES: {
1483                         /** @todo Complete verification of work_queue */
1484                         ModelAction *act = work.action;
1485                         bool updated = false;
1486
1487                         if (act->is_read()) {
1488                                 const ModelAction *rf = act->get_reads_from();
1489                                 const Promise *promise = act->get_reads_from_promise();
1490                                 if (rf) {
1491                                         if (r_modification_order(act, rf))
1492                                                 updated = true;
1493                                 } else if (promise) {
1494                                         if (r_modification_order(act, promise))
1495                                                 updated = true;
1496                                 }
1497                         }
1498                         if (act->is_write()) {
1499                                 if (w_modification_order(act))
1500                                         updated = true;
1501                         }
1502                         mo_graph->commitChanges();
1503
1504                         if (updated)
1505                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1506                         break;
1507                 }
1508                 default:
1509                         ASSERT(false);
1510                         break;
1511                 }
1512         }
1513
1514         check_curr_backtracking(curr);
1515         set_backtracking(curr);
1516         return curr;
1517 }
1518
1519 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1520 {
1521         Node *currnode = curr->get_node();
1522         Node *parnode = currnode->get_parent();
1523
1524         if ((parnode && !parnode->backtrack_empty()) ||
1525                          !currnode->misc_empty() ||
1526                          !currnode->read_from_empty() ||
1527                          !currnode->promise_empty() ||
1528                          !currnode->relseq_break_empty()) {
1529                 set_latest_backtrack(curr);
1530         }
1531 }
1532
1533 bool ModelChecker::promises_expired() const
1534 {
1535         for (unsigned int i = 0; i < promises->size(); i++) {
1536                 Promise *promise = (*promises)[i];
1537                 if (promise->get_expiration() < priv->used_sequence_numbers)
1538                         return true;
1539         }
1540         return false;
1541 }
1542
1543 /**
1544  * This is the strongest feasibility check available.
1545  * @return whether the current trace (partial or complete) must be a prefix of
1546  * a feasible trace.
1547  */
1548 bool ModelChecker::isfeasibleprefix() const
1549 {
1550         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1551 }
1552
1553 /**
1554  * Print disagnostic information about an infeasible execution
1555  * @param prefix A string to prefix the output with; if NULL, then a default
1556  * message prefix will be provided
1557  */
1558 void ModelChecker::print_infeasibility(const char *prefix) const
1559 {
1560         char buf[100];
1561         char *ptr = buf;
1562         if (mo_graph->checkForCycles())
1563                 ptr += sprintf(ptr, "[mo cycle]");
1564         if (priv->failed_promise)
1565                 ptr += sprintf(ptr, "[failed promise]");
1566         if (priv->too_many_reads)
1567                 ptr += sprintf(ptr, "[too many reads]");
1568         if (priv->no_valid_reads)
1569                 ptr += sprintf(ptr, "[no valid reads-from]");
1570         if (priv->bad_synchronization)
1571                 ptr += sprintf(ptr, "[bad sw ordering]");
1572         if (promises_expired())
1573                 ptr += sprintf(ptr, "[promise expired]");
1574         if (promises->size() != 0)
1575                 ptr += sprintf(ptr, "[unresolved promise]");
1576         if (ptr != buf)
1577                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1578 }
1579
1580 /**
1581  * Returns whether the current completed trace is feasible, except for pending
1582  * release sequences.
1583  */
1584 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1585 {
1586         return !is_infeasible() && promises->size() == 0;
1587 }
1588
1589 /**
1590  * Check if the current partial trace is infeasible. Does not check any
1591  * end-of-execution flags, which might rule out the execution. Thus, this is
1592  * useful only for ruling an execution as infeasible.
1593  * @return whether the current partial trace is infeasible.
1594  */
1595 bool ModelChecker::is_infeasible() const
1596 {
1597         return mo_graph->checkForCycles() ||
1598                 priv->no_valid_reads ||
1599                 priv->failed_promise ||
1600                 priv->too_many_reads ||
1601                 priv->bad_synchronization ||
1602                 promises_expired();
1603 }
1604
1605 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1606 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1607         ModelAction *lastread = get_last_action(act->get_tid());
1608         lastread->process_rmw(act);
1609         if (act->is_rmw()) {
1610                 if (lastread->get_reads_from())
1611                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1612                 else
1613                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1614                 mo_graph->commitChanges();
1615         }
1616         return lastread;
1617 }
1618
1619 /**
1620  * Checks whether a thread has read from the same write for too many times
1621  * without seeing the effects of a later write.
1622  *
1623  * Basic idea:
1624  * 1) there must a different write that we could read from that would satisfy the modification order,
1625  * 2) we must have read from the same value in excess of maxreads times, and
1626  * 3) that other write must have been in the reads_from set for maxreads times.
1627  *
1628  * If so, we decide that the execution is no longer feasible.
1629  */
1630 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1631 {
1632         if (params.maxreads != 0) {
1633                 if (curr->get_node()->get_read_from_past_size() <= 1)
1634                         return;
1635                 //Must make sure that execution is currently feasible...  We could
1636                 //accidentally clear by rolling back
1637                 if (is_infeasible())
1638                         return;
1639                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1640                 int tid = id_to_int(curr->get_tid());
1641
1642                 /* Skip checks */
1643                 if ((int)thrd_lists->size() <= tid)
1644                         return;
1645                 action_list_t *list = &(*thrd_lists)[tid];
1646
1647                 action_list_t::reverse_iterator rit = list->rbegin();
1648                 /* Skip past curr */
1649                 for (; (*rit) != curr; rit++)
1650                         ;
1651                 /* go past curr now */
1652                 rit++;
1653
1654                 action_list_t::reverse_iterator ritcopy = rit;
1655                 //See if we have enough reads from the same value
1656                 int count = 0;
1657                 for (; count < params.maxreads; rit++, count++) {
1658                         if (rit == list->rend())
1659                                 return;
1660                         ModelAction *act = *rit;
1661                         if (!act->is_read())
1662                                 return;
1663
1664                         if (act->get_reads_from() != rf)
1665                                 return;
1666                         if (act->get_node()->get_read_from_past_size() <= 1)
1667                                 return;
1668                 }
1669                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1670                         /* Get write */
1671                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1672
1673                         /* Need a different write */
1674                         if (write == rf)
1675                                 continue;
1676
1677                         /* Test to see whether this is a feasible write to read from */
1678                         /** NOTE: all members of read-from set should be
1679                          *  feasible, so we no longer check it here **/
1680
1681                         rit = ritcopy;
1682
1683                         bool feasiblewrite = true;
1684                         //new we need to see if this write works for everyone
1685
1686                         for (int loop = count; loop > 0; loop--, rit++) {
1687                                 ModelAction *act = *rit;
1688                                 bool foundvalue = false;
1689                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1690                                         if (act->get_node()->get_read_from_past(j) == write) {
1691                                                 foundvalue = true;
1692                                                 break;
1693                                         }
1694                                 }
1695                                 if (!foundvalue) {
1696                                         feasiblewrite = false;
1697                                         break;
1698                                 }
1699                         }
1700                         if (feasiblewrite) {
1701                                 priv->too_many_reads = true;
1702                                 return;
1703                         }
1704                 }
1705         }
1706 }
1707
1708 /**
1709  * Updates the mo_graph with the constraints imposed from the current
1710  * read.
1711  *
1712  * Basic idea is the following: Go through each other thread and find
1713  * the last action that happened before our read.  Two cases:
1714  *
1715  * (1) The action is a write => that write must either occur before
1716  * the write we read from or be the write we read from.
1717  *
1718  * (2) The action is a read => the write that that action read from
1719  * must occur before the write we read from or be the same write.
1720  *
1721  * @param curr The current action. Must be a read.
1722  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1723  * @return True if modification order edges were added; false otherwise
1724  */
1725 template <typename rf_type>
1726 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1727 {
1728         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1729         unsigned int i;
1730         bool added = false;
1731         ASSERT(curr->is_read());
1732
1733         /* Last SC fence in the current thread */
1734         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1735
1736         /* Iterate over all threads */
1737         for (i = 0; i < thrd_lists->size(); i++) {
1738                 /* Last SC fence in thread i */
1739                 ModelAction *last_sc_fence_thread_local = NULL;
1740                 if (int_to_id((int)i) != curr->get_tid())
1741                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1742
1743                 /* Last SC fence in thread i, before last SC fence in current thread */
1744                 ModelAction *last_sc_fence_thread_before = NULL;
1745                 if (last_sc_fence_local)
1746                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1747
1748                 /* Iterate over actions in thread, starting from most recent */
1749                 action_list_t *list = &(*thrd_lists)[i];
1750                 action_list_t::reverse_iterator rit;
1751                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1752                         ModelAction *act = *rit;
1753
1754                         if (act->is_write() && !act->equals(rf) && act != curr) {
1755                                 /* C++, Section 29.3 statement 5 */
1756                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1757                                                 *act < *last_sc_fence_thread_local) {
1758                                         added = mo_graph->addEdge(act, rf) || added;
1759                                         break;
1760                                 }
1761                                 /* C++, Section 29.3 statement 4 */
1762                                 else if (act->is_seqcst() && last_sc_fence_local &&
1763                                                 *act < *last_sc_fence_local) {
1764                                         added = mo_graph->addEdge(act, rf) || added;
1765                                         break;
1766                                 }
1767                                 /* C++, Section 29.3 statement 6 */
1768                                 else if (last_sc_fence_thread_before &&
1769                                                 *act < *last_sc_fence_thread_before) {
1770                                         added = mo_graph->addEdge(act, rf) || added;
1771                                         break;
1772                                 }
1773                         }
1774
1775                         /*
1776                          * Include at most one act per-thread that "happens
1777                          * before" curr. Don't consider reflexively.
1778                          */
1779                         if (act->happens_before(curr) && act != curr) {
1780                                 if (act->is_write()) {
1781                                         if (!act->equals(rf)) {
1782                                                 added = mo_graph->addEdge(act, rf) || added;
1783                                         }
1784                                 } else {
1785                                         const ModelAction *prevrf = act->get_reads_from();
1786                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1787                                         if (prevrf) {
1788                                                 if (!prevrf->equals(rf))
1789                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1790                                         } else if (!prevrf_promise->equals(rf)) {
1791                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1792                                         }
1793                                 }
1794                                 break;
1795                         }
1796                 }
1797         }
1798
1799         /*
1800          * All compatible, thread-exclusive promises must be ordered after any
1801          * concrete loads from the same thread
1802          */
1803         for (unsigned int i = 0; i < promises->size(); i++)
1804                 if ((*promises)[i]->is_compatible_exclusive(curr))
1805                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1806
1807         return added;
1808 }
1809
1810 /**
1811  * Updates the mo_graph with the constraints imposed from the current write.
1812  *
1813  * Basic idea is the following: Go through each other thread and find
1814  * the lastest action that happened before our write.  Two cases:
1815  *
1816  * (1) The action is a write => that write must occur before
1817  * the current write
1818  *
1819  * (2) The action is a read => the write that that action read from
1820  * must occur before the current write.
1821  *
1822  * This method also handles two other issues:
1823  *
1824  * (I) Sequential Consistency: Making sure that if the current write is
1825  * seq_cst, that it occurs after the previous seq_cst write.
1826  *
1827  * (II) Sending the write back to non-synchronizing reads.
1828  *
1829  * @param curr The current action. Must be a write.
1830  * @return True if modification order edges were added; false otherwise
1831  */
1832 bool ModelChecker::w_modification_order(ModelAction *curr)
1833 {
1834         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1835         unsigned int i;
1836         bool added = false;
1837         ASSERT(curr->is_write());
1838
1839         if (curr->is_seqcst()) {
1840                 /* We have to at least see the last sequentially consistent write,
1841                          so we are initialized. */
1842                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1843                 if (last_seq_cst != NULL) {
1844                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1845                 }
1846         }
1847
1848         /* Last SC fence in the current thread */
1849         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1850
1851         /* Iterate over all threads */
1852         for (i = 0; i < thrd_lists->size(); i++) {
1853                 /* Last SC fence in thread i, before last SC fence in current thread */
1854                 ModelAction *last_sc_fence_thread_before = NULL;
1855                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1856                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1857
1858                 /* Iterate over actions in thread, starting from most recent */
1859                 action_list_t *list = &(*thrd_lists)[i];
1860                 action_list_t::reverse_iterator rit;
1861                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1862                         ModelAction *act = *rit;
1863                         if (act == curr) {
1864                                 /*
1865                                  * 1) If RMW and it actually read from something, then we
1866                                  * already have all relevant edges, so just skip to next
1867                                  * thread.
1868                                  *
1869                                  * 2) If RMW and it didn't read from anything, we should
1870                                  * whatever edge we can get to speed up convergence.
1871                                  *
1872                                  * 3) If normal write, we need to look at earlier actions, so
1873                                  * continue processing list.
1874                                  */
1875                                 if (curr->is_rmw()) {
1876                                         if (curr->get_reads_from() != NULL)
1877                                                 break;
1878                                         else
1879                                                 continue;
1880                                 } else
1881                                         continue;
1882                         }
1883
1884                         /* C++, Section 29.3 statement 7 */
1885                         if (last_sc_fence_thread_before && act->is_write() &&
1886                                         *act < *last_sc_fence_thread_before) {
1887                                 added = mo_graph->addEdge(act, curr) || added;
1888                                 break;
1889                         }
1890
1891                         /*
1892                          * Include at most one act per-thread that "happens
1893                          * before" curr
1894                          */
1895                         if (act->happens_before(curr)) {
1896                                 /*
1897                                  * Note: if act is RMW, just add edge:
1898                                  *   act --mo--> curr
1899                                  * The following edge should be handled elsewhere:
1900                                  *   readfrom(act) --mo--> act
1901                                  */
1902                                 if (act->is_write())
1903                                         added = mo_graph->addEdge(act, curr) || added;
1904                                 else if (act->is_read()) {
1905                                         //if previous read accessed a null, just keep going
1906                                         if (act->get_reads_from() == NULL)
1907                                                 continue;
1908                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1909                                 }
1910                                 break;
1911                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1912                                                      !act->same_thread(curr)) {
1913                                 /* We have an action that:
1914                                    (1) did not happen before us
1915                                    (2) is a read and we are a write
1916                                    (3) cannot synchronize with us
1917                                    (4) is in a different thread
1918                                    =>
1919                                    that read could potentially read from our write.  Note that
1920                                    these checks are overly conservative at this point, we'll
1921                                    do more checks before actually removing the
1922                                    pendingfuturevalue.
1923
1924                                  */
1925                                 if (thin_air_constraint_may_allow(curr, act)) {
1926                                         if (!is_infeasible())
1927                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1928                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1929                                                 add_future_value(curr, act);
1930                                 }
1931                         }
1932                 }
1933         }
1934
1935         /*
1936          * All compatible, thread-exclusive promises must be ordered after any
1937          * concrete stores to the same thread, or else they can be merged with
1938          * this store later
1939          */
1940         for (unsigned int i = 0; i < promises->size(); i++)
1941                 if ((*promises)[i]->is_compatible_exclusive(curr))
1942                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1943
1944         return added;
1945 }
1946
1947 /** Arbitrary reads from the future are not allowed.  Section 29.3
1948  * part 9 places some constraints.  This method checks one result of constraint
1949  * constraint.  Others require compiler support. */
1950 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1951 {
1952         if (!writer->is_rmw())
1953                 return true;
1954
1955         if (!reader->is_rmw())
1956                 return true;
1957
1958         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1959                 if (search == reader)
1960                         return false;
1961                 if (search->get_tid() == reader->get_tid() &&
1962                                 search->happens_before(reader))
1963                         break;
1964         }
1965
1966         return true;
1967 }
1968
1969 /**
1970  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1971  * some constraints. This method checks one the following constraint (others
1972  * require compiler support):
1973  *
1974  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1975  */
1976 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1977 {
1978         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1979         unsigned int i;
1980         /* Iterate over all threads */
1981         for (i = 0; i < thrd_lists->size(); i++) {
1982                 const ModelAction *write_after_read = NULL;
1983
1984                 /* Iterate over actions in thread, starting from most recent */
1985                 action_list_t *list = &(*thrd_lists)[i];
1986                 action_list_t::reverse_iterator rit;
1987                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1988                         ModelAction *act = *rit;
1989
1990                         /* Don't disallow due to act == reader */
1991                         if (!reader->happens_before(act) || reader == act)
1992                                 break;
1993                         else if (act->is_write())
1994                                 write_after_read = act;
1995                         else if (act->is_read() && act->get_reads_from() != NULL)
1996                                 write_after_read = act->get_reads_from();
1997                 }
1998
1999                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2000                         return false;
2001         }
2002         return true;
2003 }
2004
2005 /**
2006  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2007  * The ModelAction under consideration is expected to be taking part in
2008  * release/acquire synchronization as an object of the "reads from" relation.
2009  * Note that this can only provide release sequence support for RMW chains
2010  * which do not read from the future, as those actions cannot be traced until
2011  * their "promise" is fulfilled. Similarly, we may not even establish the
2012  * presence of a release sequence with certainty, as some modification order
2013  * constraints may be decided further in the future. Thus, this function
2014  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2015  * and a boolean representing certainty.
2016  *
2017  * @param rf The action that might be part of a release sequence. Must be a
2018  * write.
2019  * @param release_heads A pass-by-reference style return parameter. After
2020  * execution of this function, release_heads will contain the heads of all the
2021  * relevant release sequences, if any exists with certainty
2022  * @param pending A pass-by-reference style return parameter which is only used
2023  * when returning false (i.e., uncertain). Returns most information regarding
2024  * an uncertain release sequence, including any write operations that might
2025  * break the sequence.
2026  * @return true, if the ModelChecker is certain that release_heads is complete;
2027  * false otherwise
2028  */
2029 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2030                 rel_heads_list_t *release_heads,
2031                 struct release_seq *pending) const
2032 {
2033         /* Only check for release sequences if there are no cycles */
2034         if (mo_graph->checkForCycles())
2035                 return false;
2036
2037         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2038                 ASSERT(rf->is_write());
2039
2040                 if (rf->is_release())
2041                         release_heads->push_back(rf);
2042                 else if (rf->get_last_fence_release())
2043                         release_heads->push_back(rf->get_last_fence_release());
2044                 if (!rf->is_rmw())
2045                         break; /* End of RMW chain */
2046
2047                 /** @todo Need to be smarter here...  In the linux lock
2048                  * example, this will run to the beginning of the program for
2049                  * every acquire. */
2050                 /** @todo The way to be smarter here is to keep going until 1
2051                  * thread has a release preceded by an acquire and you've seen
2052                  *       both. */
2053
2054                 /* acq_rel RMW is a sufficient stopping condition */
2055                 if (rf->is_acquire() && rf->is_release())
2056                         return true; /* complete */
2057         };
2058         if (!rf) {
2059                 /* read from future: need to settle this later */
2060                 pending->rf = NULL;
2061                 return false; /* incomplete */
2062         }
2063
2064         if (rf->is_release())
2065                 return true; /* complete */
2066
2067         /* else relaxed write
2068          * - check for fence-release in the same thread (29.8, stmt. 3)
2069          * - check modification order for contiguous subsequence
2070          *   -> rf must be same thread as release */
2071
2072         const ModelAction *fence_release = rf->get_last_fence_release();
2073         /* Synchronize with a fence-release unconditionally; we don't need to
2074          * find any more "contiguous subsequence..." for it */
2075         if (fence_release)
2076                 release_heads->push_back(fence_release);
2077
2078         int tid = id_to_int(rf->get_tid());
2079         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2080         action_list_t *list = &(*thrd_lists)[tid];
2081         action_list_t::const_reverse_iterator rit;
2082
2083         /* Find rf in the thread list */
2084         rit = std::find(list->rbegin(), list->rend(), rf);
2085         ASSERT(rit != list->rend());
2086
2087         /* Find the last {write,fence}-release */
2088         for (; rit != list->rend(); rit++) {
2089                 if (fence_release && *(*rit) < *fence_release)
2090                         break;
2091                 if ((*rit)->is_release())
2092                         break;
2093         }
2094         if (rit == list->rend()) {
2095                 /* No write-release in this thread */
2096                 return true; /* complete */
2097         } else if (fence_release && *(*rit) < *fence_release) {
2098                 /* The fence-release is more recent (and so, "stronger") than
2099                  * the most recent write-release */
2100                 return true; /* complete */
2101         } /* else, need to establish contiguous release sequence */
2102         ModelAction *release = *rit;
2103
2104         ASSERT(rf->same_thread(release));
2105
2106         pending->writes.clear();
2107
2108         bool certain = true;
2109         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2110                 if (id_to_int(rf->get_tid()) == (int)i)
2111                         continue;
2112                 list = &(*thrd_lists)[i];
2113
2114                 /* Can we ensure no future writes from this thread may break
2115                  * the release seq? */
2116                 bool future_ordered = false;
2117
2118                 ModelAction *last = get_last_action(int_to_id(i));
2119                 Thread *th = get_thread(int_to_id(i));
2120                 if ((last && rf->happens_before(last)) ||
2121                                 !is_enabled(th) ||
2122                                 th->is_complete())
2123                         future_ordered = true;
2124
2125                 ASSERT(!th->is_model_thread() || future_ordered);
2126
2127                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2128                         const ModelAction *act = *rit;
2129                         /* Reach synchronization -> this thread is complete */
2130                         if (act->happens_before(release))
2131                                 break;
2132                         if (rf->happens_before(act)) {
2133                                 future_ordered = true;
2134                                 continue;
2135                         }
2136
2137                         /* Only non-RMW writes can break release sequences */
2138                         if (!act->is_write() || act->is_rmw())
2139                                 continue;
2140
2141                         /* Check modification order */
2142                         if (mo_graph->checkReachable(rf, act)) {
2143                                 /* rf --mo--> act */
2144                                 future_ordered = true;
2145                                 continue;
2146                         }
2147                         if (mo_graph->checkReachable(act, release))
2148                                 /* act --mo--> release */
2149                                 break;
2150                         if (mo_graph->checkReachable(release, act) &&
2151                                       mo_graph->checkReachable(act, rf)) {
2152                                 /* release --mo-> act --mo--> rf */
2153                                 return true; /* complete */
2154                         }
2155                         /* act may break release sequence */
2156                         pending->writes.push_back(act);
2157                         certain = false;
2158                 }
2159                 if (!future_ordered)
2160                         certain = false; /* This thread is uncertain */
2161         }
2162
2163         if (certain) {
2164                 release_heads->push_back(release);
2165                 pending->writes.clear();
2166         } else {
2167                 pending->release = release;
2168                 pending->rf = rf;
2169         }
2170         return certain;
2171 }
2172
2173 /**
2174  * An interface for getting the release sequence head(s) with which a
2175  * given ModelAction must synchronize. This function only returns a non-empty
2176  * result when it can locate a release sequence head with certainty. Otherwise,
2177  * it may mark the internal state of the ModelChecker so that it will handle
2178  * the release sequence at a later time, causing @a acquire to update its
2179  * synchronization at some later point in execution.
2180  *
2181  * @param acquire The 'acquire' action that may synchronize with a release
2182  * sequence
2183  * @param read The read action that may read from a release sequence; this may
2184  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2185  * when 'acquire' is a fence-acquire)
2186  * @param release_heads A pass-by-reference return parameter. Will be filled
2187  * with the head(s) of the release sequence(s), if they exists with certainty.
2188  * @see ModelChecker::release_seq_heads
2189  */
2190 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2191                 ModelAction *read, rel_heads_list_t *release_heads)
2192 {
2193         const ModelAction *rf = read->get_reads_from();
2194         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2195         sequence->acquire = acquire;
2196         sequence->read = read;
2197
2198         if (!release_seq_heads(rf, release_heads, sequence)) {
2199                 /* add act to 'lazy checking' list */
2200                 pending_rel_seqs->push_back(sequence);
2201         } else {
2202                 snapshot_free(sequence);
2203         }
2204 }
2205
2206 /**
2207  * Attempt to resolve all stashed operations that might synchronize with a
2208  * release sequence for a given location. This implements the "lazy" portion of
2209  * determining whether or not a release sequence was contiguous, since not all
2210  * modification order information is present at the time an action occurs.
2211  *
2212  * @param location The location/object that should be checked for release
2213  * sequence resolutions. A NULL value means to check all locations.
2214  * @param work_queue The work queue to which to add work items as they are
2215  * generated
2216  * @return True if any updates occurred (new synchronization, new mo_graph
2217  * edges)
2218  */
2219 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2220 {
2221         bool updated = false;
2222         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2223         while (it != pending_rel_seqs->end()) {
2224                 struct release_seq *pending = *it;
2225                 ModelAction *acquire = pending->acquire;
2226                 const ModelAction *read = pending->read;
2227
2228                 /* Only resolve sequences on the given location, if provided */
2229                 if (location && read->get_location() != location) {
2230                         it++;
2231                         continue;
2232                 }
2233
2234                 const ModelAction *rf = read->get_reads_from();
2235                 rel_heads_list_t release_heads;
2236                 bool complete;
2237                 complete = release_seq_heads(rf, &release_heads, pending);
2238                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2239                         if (!acquire->has_synchronized_with(release_heads[i])) {
2240                                 if (acquire->synchronize_with(release_heads[i]))
2241                                         updated = true;
2242                                 else
2243                                         set_bad_synchronization();
2244                         }
2245                 }
2246
2247                 if (updated) {
2248                         /* Re-check all pending release sequences */
2249                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2250                         /* Re-check read-acquire for mo_graph edges */
2251                         if (acquire->is_read())
2252                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2253
2254                         /* propagate synchronization to later actions */
2255                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2256                         for (; (*rit) != acquire; rit++) {
2257                                 ModelAction *propagate = *rit;
2258                                 if (acquire->happens_before(propagate)) {
2259                                         propagate->synchronize_with(acquire);
2260                                         /* Re-check 'propagate' for mo_graph edges */
2261                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2262                                 }
2263                         }
2264                 }
2265                 if (complete) {
2266                         it = pending_rel_seqs->erase(it);
2267                         snapshot_free(pending);
2268                 } else {
2269                         it++;
2270                 }
2271         }
2272
2273         // If we resolved promises or data races, see if we have realized a data race.
2274         checkDataRaces();
2275
2276         return updated;
2277 }
2278
2279 /**
2280  * Performs various bookkeeping operations for the current ModelAction. For
2281  * instance, adds action to the per-object, per-thread action vector and to the
2282  * action trace list of all thread actions.
2283  *
2284  * @param act is the ModelAction to add.
2285  */
2286 void ModelChecker::add_action_to_lists(ModelAction *act)
2287 {
2288         int tid = id_to_int(act->get_tid());
2289         ModelAction *uninit = NULL;
2290         int uninit_id = -1;
2291         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2292         if (list->empty() && act->is_atomic_var()) {
2293                 uninit = new_uninitialized_action(act->get_location());
2294                 uninit_id = id_to_int(uninit->get_tid());
2295                 list->push_back(uninit);
2296         }
2297         list->push_back(act);
2298
2299         action_trace->push_back(act);
2300         if (uninit)
2301                 action_trace->push_front(uninit);
2302
2303         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2304         if (tid >= (int)vec->size())
2305                 vec->resize(priv->next_thread_id);
2306         (*vec)[tid].push_back(act);
2307         if (uninit)
2308                 (*vec)[uninit_id].push_front(uninit);
2309
2310         if ((int)thrd_last_action->size() <= tid)
2311                 thrd_last_action->resize(get_num_threads());
2312         (*thrd_last_action)[tid] = act;
2313         if (uninit)
2314                 (*thrd_last_action)[uninit_id] = uninit;
2315
2316         if (act->is_fence() && act->is_release()) {
2317                 if ((int)thrd_last_fence_release->size() <= tid)
2318                         thrd_last_fence_release->resize(get_num_threads());
2319                 (*thrd_last_fence_release)[tid] = act;
2320         }
2321
2322         if (act->is_wait()) {
2323                 void *mutex_loc = (void *) act->get_value();
2324                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2325
2326                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2327                 if (tid >= (int)vec->size())
2328                         vec->resize(priv->next_thread_id);
2329                 (*vec)[tid].push_back(act);
2330         }
2331 }
2332
2333 /**
2334  * @brief Get the last action performed by a particular Thread
2335  * @param tid The thread ID of the Thread in question
2336  * @return The last action in the thread
2337  */
2338 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2339 {
2340         int threadid = id_to_int(tid);
2341         if (threadid < (int)thrd_last_action->size())
2342                 return (*thrd_last_action)[id_to_int(tid)];
2343         else
2344                 return NULL;
2345 }
2346
2347 /**
2348  * @brief Get the last fence release performed by a particular Thread
2349  * @param tid The thread ID of the Thread in question
2350  * @return The last fence release in the thread, if one exists; NULL otherwise
2351  */
2352 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2353 {
2354         int threadid = id_to_int(tid);
2355         if (threadid < (int)thrd_last_fence_release->size())
2356                 return (*thrd_last_fence_release)[id_to_int(tid)];
2357         else
2358                 return NULL;
2359 }
2360
2361 /**
2362  * Gets the last memory_order_seq_cst write (in the total global sequence)
2363  * performed on a particular object (i.e., memory location), not including the
2364  * current action.
2365  * @param curr The current ModelAction; also denotes the object location to
2366  * check
2367  * @return The last seq_cst write
2368  */
2369 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2370 {
2371         void *location = curr->get_location();
2372         action_list_t *list = get_safe_ptr_action(obj_map, location);
2373         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2374         action_list_t::reverse_iterator rit;
2375         for (rit = list->rbegin(); rit != list->rend(); rit++)
2376                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2377                         return *rit;
2378         return NULL;
2379 }
2380
2381 /**
2382  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2383  * performed in a particular thread, prior to a particular fence.
2384  * @param tid The ID of the thread to check
2385  * @param before_fence The fence from which to begin the search; if NULL, then
2386  * search for the most recent fence in the thread.
2387  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2388  */
2389 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2390 {
2391         /* All fences should have NULL location */
2392         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2393         action_list_t::reverse_iterator rit = list->rbegin();
2394
2395         if (before_fence) {
2396                 for (; rit != list->rend(); rit++)
2397                         if (*rit == before_fence)
2398                                 break;
2399
2400                 ASSERT(*rit == before_fence);
2401                 rit++;
2402         }
2403
2404         for (; rit != list->rend(); rit++)
2405                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2406                         return *rit;
2407         return NULL;
2408 }
2409
2410 /**
2411  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2412  * location). This function identifies the mutex according to the current
2413  * action, which is presumed to perform on the same mutex.
2414  * @param curr The current ModelAction; also denotes the object location to
2415  * check
2416  * @return The last unlock operation
2417  */
2418 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2419 {
2420         void *location = curr->get_location();
2421         action_list_t *list = get_safe_ptr_action(obj_map, location);
2422         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2423         action_list_t::reverse_iterator rit;
2424         for (rit = list->rbegin(); rit != list->rend(); rit++)
2425                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2426                         return *rit;
2427         return NULL;
2428 }
2429
2430 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2431 {
2432         ModelAction *parent = get_last_action(tid);
2433         if (!parent)
2434                 parent = get_thread(tid)->get_creation();
2435         return parent;
2436 }
2437
2438 /**
2439  * Returns the clock vector for a given thread.
2440  * @param tid The thread whose clock vector we want
2441  * @return Desired clock vector
2442  */
2443 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2444 {
2445         return get_parent_action(tid)->get_cv();
2446 }
2447
2448 /**
2449  * Resolve a set of Promises with a current write. The set is provided in the
2450  * Node corresponding to @a write.
2451  * @param write The ModelAction that is fulfilling Promises
2452  * @return True if promises were resolved; false otherwise
2453  */
2454 bool ModelChecker::resolve_promises(ModelAction *write)
2455 {
2456         bool haveResolved = false;
2457         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2458         promise_list_t mustResolve, resolved;
2459
2460         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2461                 Promise *promise = (*promises)[promise_index];
2462                 if (write->get_node()->get_promise(i)) {
2463                         for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2464                                 ModelAction *read = promise->get_reader(j);
2465                                 read_from(read, write);
2466                                 actions_to_check.push_back(read);
2467                         }
2468                         //Make sure the promise's value matches the write's value
2469                         ASSERT(promise->is_compatible(write));
2470                         mo_graph->resolvePromise(promise, write, &mustResolve);
2471
2472                         resolved.push_back(promise);
2473                         promises->erase(promises->begin() + promise_index);
2474
2475                         haveResolved = true;
2476                 } else
2477                         promise_index++;
2478         }
2479
2480         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2481                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2482                                 == resolved.end())
2483                         priv->failed_promise = true;
2484         }
2485         for (unsigned int i = 0; i < resolved.size(); i++)
2486                 delete resolved[i];
2487         //Check whether reading these writes has made threads unable to
2488         //resolve promises
2489
2490         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2491                 ModelAction *read = actions_to_check[i];
2492                 mo_check_promises(read, true);
2493         }
2494
2495         return haveResolved;
2496 }
2497
2498 /**
2499  * Compute the set of promises that could potentially be satisfied by this
2500  * action. Note that the set computation actually appears in the Node, not in
2501  * ModelChecker.
2502  * @param curr The ModelAction that may satisfy promises
2503  */
2504 void ModelChecker::compute_promises(ModelAction *curr)
2505 {
2506         for (unsigned int i = 0; i < promises->size(); i++) {
2507                 Promise *promise = (*promises)[i];
2508                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2509                         continue;
2510
2511                 bool satisfy = true;
2512                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2513                         const ModelAction *act = promise->get_reader(j);
2514                         if (act->happens_before(curr) ||
2515                                         act->could_synchronize_with(curr)) {
2516                                 satisfy = false;
2517                                 break;
2518                         }
2519                 }
2520                 if (satisfy)
2521                         curr->get_node()->set_promise(i);
2522         }
2523 }
2524
2525 /** Checks promises in response to change in ClockVector Threads. */
2526 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2527 {
2528         for (unsigned int i = 0; i < promises->size(); i++) {
2529                 Promise *promise = (*promises)[i];
2530                 if (!promise->thread_is_available(tid))
2531                         continue;
2532                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2533                         const ModelAction *act = promise->get_reader(j);
2534                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2535                                         merge_cv->synchronized_since(act)) {
2536                                 if (promise->eliminate_thread(tid)) {
2537                                         /* Promise has failed */
2538                                         priv->failed_promise = true;
2539                                         return;
2540                                 }
2541                         }
2542                 }
2543         }
2544 }
2545
2546 void ModelChecker::check_promises_thread_disabled()
2547 {
2548         for (unsigned int i = 0; i < promises->size(); i++) {
2549                 Promise *promise = (*promises)[i];
2550                 if (promise->has_failed()) {
2551                         priv->failed_promise = true;
2552                         return;
2553                 }
2554         }
2555 }
2556
2557 /**
2558  * @brief Checks promises in response to addition to modification order for
2559  * threads.
2560  *
2561  * We test whether threads are still available for satisfying promises after an
2562  * addition to our modification order constraints. Those that are unavailable
2563  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2564  * that promise has failed.
2565  *
2566  * @param act The ModelAction which updated the modification order
2567  * @param is_read_check Should be true if act is a read and we must check for
2568  * updates to the store from which it read (there is a distinction here for
2569  * RMW's, which are both a load and a store)
2570  */
2571 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2572 {
2573         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2574
2575         for (unsigned int i = 0; i < promises->size(); i++) {
2576                 Promise *promise = (*promises)[i];
2577
2578                 // Is this promise on the same location?
2579                 if (!promise->same_location(write))
2580                         continue;
2581
2582                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2583                         const ModelAction *pread = promise->get_reader(j);
2584                         if (!pread->happens_before(act))
2585                                continue;
2586                         if (mo_graph->checkPromise(write, promise)) {
2587                                 priv->failed_promise = true;
2588                                 return;
2589                         }
2590                         break;
2591                 }
2592
2593                 // Don't do any lookups twice for the same thread
2594                 if (!promise->thread_is_available(act->get_tid()))
2595                         continue;
2596
2597                 if (mo_graph->checkReachable(promise, write)) {
2598                         if (mo_graph->checkPromise(write, promise)) {
2599                                 priv->failed_promise = true;
2600                                 return;
2601                         }
2602                 }
2603         }
2604 }
2605
2606 /**
2607  * Compute the set of writes that may break the current pending release
2608  * sequence. This information is extracted from previou release sequence
2609  * calculations.
2610  *
2611  * @param curr The current ModelAction. Must be a release sequence fixup
2612  * action.
2613  */
2614 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2615 {
2616         if (pending_rel_seqs->empty())
2617                 return;
2618
2619         struct release_seq *pending = pending_rel_seqs->back();
2620         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2621                 const ModelAction *write = pending->writes[i];
2622                 curr->get_node()->add_relseq_break(write);
2623         }
2624
2625         /* NULL means don't break the sequence; just synchronize */
2626         curr->get_node()->add_relseq_break(NULL);
2627 }
2628
2629 /**
2630  * Build up an initial set of all past writes that this 'read' action may read
2631  * from, as well as any previously-observed future values that must still be valid.
2632  *
2633  * @param curr is the current ModelAction that we are exploring; it must be a
2634  * 'read' operation.
2635  */
2636 void ModelChecker::build_may_read_from(ModelAction *curr)
2637 {
2638         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2639         unsigned int i;
2640         ASSERT(curr->is_read());
2641
2642         ModelAction *last_sc_write = NULL;
2643
2644         if (curr->is_seqcst())
2645                 last_sc_write = get_last_seq_cst_write(curr);
2646
2647         /* Iterate over all threads */
2648         for (i = 0; i < thrd_lists->size(); i++) {
2649                 /* Iterate over actions in thread, starting from most recent */
2650                 action_list_t *list = &(*thrd_lists)[i];
2651                 action_list_t::reverse_iterator rit;
2652                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2653                         ModelAction *act = *rit;
2654
2655                         /* Only consider 'write' actions */
2656                         if (!act->is_write() || act == curr)
2657                                 continue;
2658
2659                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2660                         bool allow_read = true;
2661
2662                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2663                                 allow_read = false;
2664                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2665                                 allow_read = false;
2666
2667                         if (allow_read) {
2668                                 /* Only add feasible reads */
2669                                 mo_graph->startChanges();
2670                                 r_modification_order(curr, act);
2671                                 if (!is_infeasible())
2672                                         curr->get_node()->add_read_from_past(act);
2673                                 mo_graph->rollbackChanges();
2674                         }
2675
2676                         /* Include at most one act per-thread that "happens before" curr */
2677                         if (act->happens_before(curr))
2678                                 break;
2679                 }
2680         }
2681
2682         /* Inherit existing, promised future values */
2683         for (i = 0; i < promises->size(); i++) {
2684                 const Promise *promise = (*promises)[i];
2685                 const ModelAction *promise_read = promise->get_reader(0);
2686                 if (promise_read->same_var(curr)) {
2687                         /* Only add feasible future-values */
2688                         mo_graph->startChanges();
2689                         r_modification_order(curr, promise);
2690                         if (!is_infeasible())
2691                                 curr->get_node()->add_read_from_promise(promise_read);
2692                         mo_graph->rollbackChanges();
2693                 }
2694         }
2695
2696         /* We may find no valid may-read-from only if the execution is doomed */
2697         if (!curr->get_node()->read_from_size()) {
2698                 priv->no_valid_reads = true;
2699                 set_assert();
2700         }
2701
2702         if (DBG_ENABLED()) {
2703                 model_print("Reached read action:\n");
2704                 curr->print();
2705                 model_print("Printing read_from_past\n");
2706                 curr->get_node()->print_read_from_past();
2707                 model_print("End printing read_from_past\n");
2708         }
2709 }
2710
2711 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2712 {
2713         for ( ; write != NULL; write = write->get_reads_from()) {
2714                 /* UNINIT actions don't have a Node, and they never sleep */
2715                 if (write->is_uninitialized())
2716                         return true;
2717                 Node *prevnode = write->get_node()->get_parent();
2718
2719                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2720                 if (write->is_release() && thread_sleep)
2721                         return true;
2722                 if (!write->is_rmw())
2723                         return false;
2724         }
2725         return true;
2726 }
2727
2728 /**
2729  * @brief Create a new action representing an uninitialized atomic
2730  * @param location The memory location of the atomic object
2731  * @return A pointer to a new ModelAction
2732  */
2733 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2734 {
2735         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2736         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2737         act->create_cv(NULL);
2738         return act;
2739 }
2740
2741 static void print_list(action_list_t *list)
2742 {
2743         action_list_t::iterator it;
2744
2745         model_print("---------------------------------------------------------------------\n");
2746
2747         unsigned int hash = 0;
2748
2749         for (it = list->begin(); it != list->end(); it++) {
2750                 (*it)->print();
2751                 hash = hash^(hash<<3)^((*it)->hash());
2752         }
2753         model_print("HASH %u\n", hash);
2754         model_print("---------------------------------------------------------------------\n");
2755 }
2756
2757 #if SUPPORT_MOD_ORDER_DUMP
2758 void ModelChecker::dumpGraph(char *filename) const
2759 {
2760         char buffer[200];
2761         sprintf(buffer, "%s.dot", filename);
2762         FILE *file = fopen(buffer, "w");
2763         fprintf(file, "digraph %s {\n", filename);
2764         mo_graph->dumpNodes(file);
2765         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2766
2767         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2768                 ModelAction *act = *it;
2769                 if (act->is_read()) {
2770                         mo_graph->dot_print_node(file, act);
2771                         if (act->get_reads_from())
2772                                 mo_graph->dot_print_edge(file,
2773                                                 act->get_reads_from(),
2774                                                 act,
2775                                                 "label=\"rf\", color=red, weight=2");
2776                         else
2777                                 mo_graph->dot_print_edge(file,
2778                                                 act->get_reads_from_promise(),
2779                                                 act,
2780                                                 "label=\"rf\", color=red");
2781                 }
2782                 if (thread_array[act->get_tid()]) {
2783                         mo_graph->dot_print_edge(file,
2784                                         thread_array[id_to_int(act->get_tid())],
2785                                         act,
2786                                         "label=\"sb\", color=blue, weight=400");
2787                 }
2788
2789                 thread_array[act->get_tid()] = act;
2790         }
2791         fprintf(file, "}\n");
2792         model_free(thread_array);
2793         fclose(file);
2794 }
2795 #endif
2796
2797 /** @brief Prints an execution trace summary. */
2798 void ModelChecker::print_summary() const
2799 {
2800 #if SUPPORT_MOD_ORDER_DUMP
2801         char buffername[100];
2802         sprintf(buffername, "exec%04u", stats.num_total);
2803         mo_graph->dumpGraphToFile(buffername);
2804         sprintf(buffername, "graph%04u", stats.num_total);
2805         dumpGraph(buffername);
2806 #endif
2807
2808         model_print("Execution %d:", stats.num_total);
2809         if (isfeasibleprefix()) {
2810                 if (scheduler->all_threads_sleeping())
2811                         model_print(" SLEEP-SET REDUNDANT");
2812                 model_print("\n");
2813         } else
2814                 print_infeasibility(" INFEASIBLE");
2815         print_list(action_trace);
2816         model_print("\n");
2817 }
2818
2819 /**
2820  * Add a Thread to the system for the first time. Should only be called once
2821  * per thread.
2822  * @param t The Thread to add
2823  */
2824 void ModelChecker::add_thread(Thread *t)
2825 {
2826         thread_map->put(id_to_int(t->get_id()), t);
2827         scheduler->add_thread(t);
2828 }
2829
2830 /**
2831  * Removes a thread from the scheduler.
2832  * @param the thread to remove.
2833  */
2834 void ModelChecker::remove_thread(Thread *t)
2835 {
2836         scheduler->remove_thread(t);
2837 }
2838
2839 /**
2840  * @brief Get a Thread reference by its ID
2841  * @param tid The Thread's ID
2842  * @return A Thread reference
2843  */
2844 Thread * ModelChecker::get_thread(thread_id_t tid) const
2845 {
2846         return thread_map->get(id_to_int(tid));
2847 }
2848
2849 /**
2850  * @brief Get a reference to the Thread in which a ModelAction was executed
2851  * @param act The ModelAction
2852  * @return A Thread reference
2853  */
2854 Thread * ModelChecker::get_thread(const ModelAction *act) const
2855 {
2856         return get_thread(act->get_tid());
2857 }
2858
2859 /**
2860  * @brief Get a Promise's "promise number"
2861  *
2862  * A "promise number" is an index number that is unique to a promise, valid
2863  * only for a specific snapshot of an execution trace. Promises may come and go
2864  * as they are generated an resolved, so an index only retains meaning for the
2865  * current snapshot.
2866  *
2867  * @param promise The Promise to check
2868  * @return The promise index, if the promise still is valid; otherwise -1
2869  */
2870 int ModelChecker::get_promise_number(const Promise *promise) const
2871 {
2872         for (unsigned int i = 0; i < promises->size(); i++)
2873                 if ((*promises)[i] == promise)
2874                         return i;
2875         /* Not found */
2876         return -1;
2877 }
2878
2879 /**
2880  * @brief Check if a Thread is currently enabled
2881  * @param t The Thread to check
2882  * @return True if the Thread is currently enabled
2883  */
2884 bool ModelChecker::is_enabled(Thread *t) const
2885 {
2886         return scheduler->is_enabled(t);
2887 }
2888
2889 /**
2890  * @brief Check if a Thread is currently enabled
2891  * @param tid The ID of the Thread to check
2892  * @return True if the Thread is currently enabled
2893  */
2894 bool ModelChecker::is_enabled(thread_id_t tid) const
2895 {
2896         return scheduler->is_enabled(tid);
2897 }
2898
2899 /**
2900  * Switch from a model-checker context to a user-thread context. This is the
2901  * complement of ModelChecker::switch_to_master and must be called from the
2902  * model-checker context
2903  *
2904  * @param thread The user-thread to switch to
2905  */
2906 void ModelChecker::switch_from_master(Thread *thread)
2907 {
2908         scheduler->set_current_thread(thread);
2909         Thread::swap(&system_context, thread);
2910 }
2911
2912 /**
2913  * Switch from a user-context to the "master thread" context (a.k.a. system
2914  * context). This switch is made with the intention of exploring a particular
2915  * model-checking action (described by a ModelAction object). Must be called
2916  * from a user-thread context.
2917  *
2918  * @param act The current action that will be explored. May be NULL only if
2919  * trace is exiting via an assertion (see ModelChecker::set_assert and
2920  * ModelChecker::has_asserted).
2921  * @return Return the value returned by the current action
2922  */
2923 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2924 {
2925         DBG();
2926         Thread *old = thread_current();
2927         ASSERT(!old->get_pending());
2928         old->set_pending(act);
2929         if (Thread::swap(old, &system_context) < 0) {
2930                 perror("swap threads");
2931                 exit(EXIT_FAILURE);
2932         }
2933         return old->get_return_value();
2934 }
2935
2936 /**
2937  * Takes the next step in the execution, if possible.
2938  * @param curr The current step to take
2939  * @return Returns the next Thread to run, if any; NULL if this execution
2940  * should terminate
2941  */
2942 Thread * ModelChecker::take_step(ModelAction *curr)
2943 {
2944         Thread *curr_thrd = get_thread(curr);
2945         ASSERT(curr_thrd->get_state() == THREAD_READY);
2946
2947         curr = check_current_action(curr);
2948
2949         /* Infeasible -> don't take any more steps */
2950         if (is_infeasible())
2951                 return NULL;
2952         else if (isfeasibleprefix() && have_bug_reports()) {
2953                 set_assert();
2954                 return NULL;
2955         }
2956
2957         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2958                 return NULL;
2959
2960         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2961                 scheduler->remove_thread(curr_thrd);
2962
2963         Thread *next_thrd = get_next_thread(curr);
2964
2965         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2966                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2967
2968         return next_thrd;
2969 }
2970
2971 /** Wrapper to run the user's main function, with appropriate arguments */
2972 void user_main_wrapper(void *)
2973 {
2974         user_main(model->params.argc, model->params.argv);
2975 }
2976
2977 /** @brief Run ModelChecker for the user program */
2978 void ModelChecker::run()
2979 {
2980         do {
2981                 thrd_t user_thread;
2982                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2983                 add_thread(t);
2984
2985                 do {
2986                         /*
2987                          * Stash next pending action(s) for thread(s). There
2988                          * should only need to stash one thread's action--the
2989                          * thread which just took a step--plus the first step
2990                          * for any newly-created thread
2991                          */
2992                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2993                                 thread_id_t tid = int_to_id(i);
2994                                 Thread *thr = get_thread(tid);
2995                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2996                                         switch_from_master(thr);
2997                                 }
2998                         }
2999
3000                         /* Catch assertions from prior take_step or from
3001                          * between-ModelAction bugs (e.g., data races) */
3002                         if (has_asserted())
3003                                 break;
3004
3005                         /* Consume the next action for a Thread */
3006                         ModelAction *curr = t->get_pending();
3007                         t->set_pending(NULL);
3008                         t = take_step(curr);
3009                 } while (t && !t->is_model_thread());
3010
3011                 /*
3012                  * Launch end-of-execution release sequence fixups only when
3013                  * the execution is otherwise feasible AND there are:
3014                  *
3015                  * (1) pending release sequences
3016                  * (2) pending assertions that could be invalidated by a change
3017                  * in clock vectors (i.e., data races)
3018                  * (3) no pending promises
3019                  */
3020                 while (!pending_rel_seqs->empty() &&
3021                                 is_feasible_prefix_ignore_relseq() &&
3022                                 !unrealizedraces.empty()) {
3023                         model_print("*** WARNING: release sequence fixup action "
3024                                         "(%zu pending release seuqence(s)) ***\n",
3025                                         pending_rel_seqs->size());
3026                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3027                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3028                                         model_thread);
3029                         take_step(fixup);
3030                 };
3031         } while (next_execution());
3032
3033         model_print("******* Model-checking complete: *******\n");
3034         print_stats();
3035 }