refactor DBG_ENABLED() vs. verbose
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if a Thread has entered a circular wait deadlock situation. This will
405  * not check other threads for potential deadlock situations, and may miss
406  * deadlocks involving WAIT.
407  *
408  * @param t The thread which may have entered a deadlock
409  * @return True if this Thread entered a deadlock; false otherwise
410  */
411 bool ModelChecker::is_circular_wait(const Thread *t) const
412 {
413         for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on())
414                 if (waiting == t)
415                         return true;
416         return false;
417 }
418
419 /**
420  * Check if this is a complete execution. That is, have all thread completed
421  * execution (rather than exiting because sleep sets have forced a redundant
422  * execution).
423  *
424  * @return True if the execution is complete.
425  */
426 bool ModelChecker::is_complete_execution() const
427 {
428         for (unsigned int i = 0; i < get_num_threads(); i++)
429                 if (is_enabled(int_to_id(i)))
430                         return false;
431         return true;
432 }
433
434 /**
435  * @brief Assert a bug in the executing program.
436  *
437  * Use this function to assert any sort of bug in the user program. If the
438  * current trace is feasible (actually, a prefix of some feasible execution),
439  * then this execution will be aborted, printing the appropriate message. If
440  * the current trace is not yet feasible, the error message will be stashed and
441  * printed if the execution ever becomes feasible.
442  *
443  * @param msg Descriptive message for the bug (do not include newline char)
444  * @return True if bug is immediately-feasible
445  */
446 bool ModelChecker::assert_bug(const char *msg)
447 {
448         priv->bugs.push_back(new bug_message(msg));
449
450         if (isfeasibleprefix()) {
451                 set_assert();
452                 return true;
453         }
454         return false;
455 }
456
457 /**
458  * @brief Assert a bug in the executing program, asserted by a user thread
459  * @see ModelChecker::assert_bug
460  * @param msg Descriptive message for the bug (do not include newline char)
461  */
462 void ModelChecker::assert_user_bug(const char *msg)
463 {
464         /* If feasible bug, bail out now */
465         if (assert_bug(msg))
466                 switch_to_master(NULL);
467 }
468
469 /** @return True, if any bugs have been reported for this execution */
470 bool ModelChecker::have_bug_reports() const
471 {
472         return priv->bugs.size() != 0;
473 }
474
475 /** @brief Print bug report listing for this execution (if any bugs exist) */
476 void ModelChecker::print_bugs() const
477 {
478         if (have_bug_reports()) {
479                 model_print("Bug report: %zu bug%s detected\n",
480                                 priv->bugs.size(),
481                                 priv->bugs.size() > 1 ? "s" : "");
482                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
483                         priv->bugs[i]->print();
484         }
485 }
486
487 /**
488  * @brief Record end-of-execution stats
489  *
490  * Must be run when exiting an execution. Records various stats.
491  * @see struct execution_stats
492  */
493 void ModelChecker::record_stats()
494 {
495         stats.num_total++;
496         if (!isfeasibleprefix())
497                 stats.num_infeasible++;
498         else if (have_bug_reports())
499                 stats.num_buggy_executions++;
500         else if (is_complete_execution())
501                 stats.num_complete++;
502         else {
503                 stats.num_redundant++;
504
505                 /**
506                  * @todo We can violate this ASSERT() when fairness/sleep sets
507                  * conflict to cause an execution to terminate, e.g. with:
508                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
509                  */
510                 //ASSERT(scheduler->all_threads_sleeping());
511         }
512 }
513
514 /** @brief Print execution stats */
515 void ModelChecker::print_stats() const
516 {
517         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
518         model_print("Number of redundant executions: %d\n", stats.num_redundant);
519         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
520         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
521         model_print("Total executions: %d\n", stats.num_total);
522         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
523 }
524
525 /**
526  * @brief End-of-exeuction print
527  * @param printbugs Should any existing bugs be printed?
528  */
529 void ModelChecker::print_execution(bool printbugs) const
530 {
531         print_program_output();
532
533         if (params.verbose) {
534                 model_print("Earliest divergence point since last feasible execution:\n");
535                 if (earliest_diverge)
536                         earliest_diverge->print();
537                 else
538                         model_print("(Not set)\n");
539
540                 model_print("\n");
541                 print_stats();
542         }
543
544         /* Don't print invalid bugs */
545         if (printbugs)
546                 print_bugs();
547
548         model_print("\n");
549         print_summary();
550 }
551
552 /**
553  * Queries the model-checker for more executions to explore and, if one
554  * exists, resets the model-checker state to execute a new execution.
555  *
556  * @return If there are more executions to explore, return true. Otherwise,
557  * return false.
558  */
559 bool ModelChecker::next_execution()
560 {
561         DBG();
562         /* Is this execution a feasible execution that's worth bug-checking? */
563         bool complete = isfeasibleprefix() && (is_complete_execution() ||
564                         have_bug_reports());
565
566         /* End-of-execution bug checks */
567         if (complete) {
568                 if (is_deadlocked())
569                         assert_bug("Deadlock detected");
570
571                 checkDataRaces();
572         }
573
574         record_stats();
575
576         /* Output */
577         if (params.verbose || (complete && have_bug_reports()))
578                 print_execution(complete);
579         else
580                 clear_program_output();
581
582         if (complete)
583                 earliest_diverge = NULL;
584
585         if ((diverge = get_next_backtrack()) == NULL)
586                 return false;
587
588         if (DBG_ENABLED()) {
589                 model_print("Next execution will diverge at:\n");
590                 diverge->print();
591         }
592
593         reset_to_initial_state();
594         return true;
595 }
596
597 /**
598  * @brief Find the last fence-related backtracking conflict for a ModelAction
599  *
600  * This function performs the search for the most recent conflicting action
601  * against which we should perform backtracking, as affected by fence
602  * operations. This includes pairs of potentially-synchronizing actions which
603  * occur due to fence-acquire or fence-release, and hence should be explored in
604  * the opposite execution order.
605  *
606  * @param act The current action
607  * @return The most recent action which conflicts with act due to fences
608  */
609 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
610 {
611         /* Only perform release/acquire fence backtracking for stores */
612         if (!act->is_write())
613                 return NULL;
614
615         /* Find a fence-release (or, act is a release) */
616         ModelAction *last_release;
617         if (act->is_release())
618                 last_release = act;
619         else
620                 last_release = get_last_fence_release(act->get_tid());
621         if (!last_release)
622                 return NULL;
623
624         /* Skip past the release */
625         action_list_t *list = action_trace;
626         action_list_t::reverse_iterator rit;
627         for (rit = list->rbegin(); rit != list->rend(); rit++)
628                 if (*rit == last_release)
629                         break;
630         ASSERT(rit != list->rend());
631
632         /* Find a prior:
633          *   load-acquire
634          * or
635          *   load --sb-> fence-acquire */
636         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
637         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
638         bool found_acquire_fences = false;
639         for ( ; rit != list->rend(); rit++) {
640                 ModelAction *prev = *rit;
641                 if (act->same_thread(prev))
642                         continue;
643
644                 int tid = id_to_int(prev->get_tid());
645
646                 if (prev->is_read() && act->same_var(prev)) {
647                         if (prev->is_acquire()) {
648                                 /* Found most recent load-acquire, don't need
649                                  * to search for more fences */
650                                 if (!found_acquire_fences)
651                                         return NULL;
652                         } else {
653                                 prior_loads[tid] = prev;
654                         }
655                 }
656                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
657                         found_acquire_fences = true;
658                         acquire_fences[tid] = prev;
659                 }
660         }
661
662         ModelAction *latest_backtrack = NULL;
663         for (unsigned int i = 0; i < acquire_fences.size(); i++)
664                 if (acquire_fences[i] && prior_loads[i])
665                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
666                                 latest_backtrack = acquire_fences[i];
667         return latest_backtrack;
668 }
669
670 /**
671  * @brief Find the last backtracking conflict for a ModelAction
672  *
673  * This function performs the search for the most recent conflicting action
674  * against which we should perform backtracking. This primary includes pairs of
675  * synchronizing actions which should be explored in the opposite execution
676  * order.
677  *
678  * @param act The current action
679  * @return The most recent action which conflicts with act
680  */
681 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
682 {
683         switch (act->get_type()) {
684         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
685         case ATOMIC_READ:
686         case ATOMIC_WRITE:
687         case ATOMIC_RMW: {
688                 ModelAction *ret = NULL;
689
690                 /* linear search: from most recent to oldest */
691                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
692                 action_list_t::reverse_iterator rit;
693                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
694                         ModelAction *prev = *rit;
695                         if (prev->could_synchronize_with(act)) {
696                                 ret = prev;
697                                 break;
698                         }
699                 }
700
701                 ModelAction *ret2 = get_last_fence_conflict(act);
702                 if (!ret2)
703                         return ret;
704                 if (!ret)
705                         return ret2;
706                 if (*ret < *ret2)
707                         return ret2;
708                 return ret;
709         }
710         case ATOMIC_LOCK:
711         case ATOMIC_TRYLOCK: {
712                 /* linear search: from most recent to oldest */
713                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
714                 action_list_t::reverse_iterator rit;
715                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
716                         ModelAction *prev = *rit;
717                         if (act->is_conflicting_lock(prev))
718                                 return prev;
719                 }
720                 break;
721         }
722         case ATOMIC_UNLOCK: {
723                 /* linear search: from most recent to oldest */
724                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
725                 action_list_t::reverse_iterator rit;
726                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
727                         ModelAction *prev = *rit;
728                         if (!act->same_thread(prev) && prev->is_failed_trylock())
729                                 return prev;
730                 }
731                 break;
732         }
733         case ATOMIC_WAIT: {
734                 /* linear search: from most recent to oldest */
735                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
736                 action_list_t::reverse_iterator rit;
737                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
738                         ModelAction *prev = *rit;
739                         if (!act->same_thread(prev) && prev->is_failed_trylock())
740                                 return prev;
741                         if (!act->same_thread(prev) && prev->is_notify())
742                                 return prev;
743                 }
744                 break;
745         }
746
747         case ATOMIC_NOTIFY_ALL:
748         case ATOMIC_NOTIFY_ONE: {
749                 /* linear search: from most recent to oldest */
750                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
751                 action_list_t::reverse_iterator rit;
752                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
753                         ModelAction *prev = *rit;
754                         if (!act->same_thread(prev) && prev->is_wait())
755                                 return prev;
756                 }
757                 break;
758         }
759         default:
760                 break;
761         }
762         return NULL;
763 }
764
765 /** This method finds backtracking points where we should try to
766  * reorder the parameter ModelAction against.
767  *
768  * @param the ModelAction to find backtracking points for.
769  */
770 void ModelChecker::set_backtracking(ModelAction *act)
771 {
772         Thread *t = get_thread(act);
773         ModelAction *prev = get_last_conflict(act);
774         if (prev == NULL)
775                 return;
776
777         Node *node = prev->get_node()->get_parent();
778
779         int low_tid, high_tid;
780         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
781                 low_tid = id_to_int(act->get_tid());
782                 high_tid = low_tid + 1;
783         } else {
784                 low_tid = 0;
785                 high_tid = get_num_threads();
786         }
787
788         for (int i = low_tid; i < high_tid; i++) {
789                 thread_id_t tid = int_to_id(i);
790
791                 /* Make sure this thread can be enabled here. */
792                 if (i >= node->get_num_threads())
793                         break;
794
795                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
796                 if (node->enabled_status(tid) != THREAD_ENABLED)
797                         continue;
798
799                 /* Check if this has been explored already */
800                 if (node->has_been_explored(tid))
801                         continue;
802
803                 /* See if fairness allows */
804                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
805                         bool unfair = false;
806                         for (int t = 0; t < node->get_num_threads(); t++) {
807                                 thread_id_t tother = int_to_id(t);
808                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
809                                         unfair = true;
810                                         break;
811                                 }
812                         }
813                         if (unfair)
814                                 continue;
815                 }
816                 /* Cache the latest backtracking point */
817                 set_latest_backtrack(prev);
818
819                 /* If this is a new backtracking point, mark the tree */
820                 if (!node->set_backtrack(tid))
821                         continue;
822                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
823                                         id_to_int(prev->get_tid()),
824                                         id_to_int(t->get_id()));
825                 if (DBG_ENABLED()) {
826                         prev->print();
827                         act->print();
828                 }
829         }
830 }
831
832 /**
833  * @brief Cache the a backtracking point as the "most recent", if eligible
834  *
835  * Note that this does not prepare the NodeStack for this backtracking
836  * operation, it only caches the action on a per-execution basis
837  *
838  * @param act The operation at which we should explore a different next action
839  * (i.e., backtracking point)
840  * @return True, if this action is now the most recent backtracking point;
841  * false otherwise
842  */
843 bool ModelChecker::set_latest_backtrack(ModelAction *act)
844 {
845         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
846                 priv->next_backtrack = act;
847                 return true;
848         }
849         return false;
850 }
851
852 /**
853  * Returns last backtracking point. The model checker will explore a different
854  * path for this point in the next execution.
855  * @return The ModelAction at which the next execution should diverge.
856  */
857 ModelAction * ModelChecker::get_next_backtrack()
858 {
859         ModelAction *next = priv->next_backtrack;
860         priv->next_backtrack = NULL;
861         return next;
862 }
863
864 /**
865  * Processes a read model action.
866  * @param curr is the read model action to process.
867  * @return True if processing this read updates the mo_graph.
868  */
869 bool ModelChecker::process_read(ModelAction *curr)
870 {
871         Node *node = curr->get_node();
872         while (true) {
873                 bool updated = false;
874                 switch (node->get_read_from_status()) {
875                 case READ_FROM_PAST: {
876                         const ModelAction *rf = node->get_read_from_past();
877                         ASSERT(rf);
878
879                         mo_graph->startChanges();
880
881                         ASSERT(!is_infeasible());
882                         if (!check_recency(curr, rf)) {
883                                 if (node->increment_read_from()) {
884                                         mo_graph->rollbackChanges();
885                                         continue;
886                                 } else {
887                                         priv->too_many_reads = true;
888                                 }
889                         }
890
891                         updated = r_modification_order(curr, rf);
892                         read_from(curr, rf);
893                         mo_graph->commitChanges();
894                         mo_check_promises(curr, true);
895                         break;
896                 }
897                 case READ_FROM_PROMISE: {
898                         Promise *promise = curr->get_node()->get_read_from_promise();
899                         if (promise->add_reader(curr))
900                                 priv->failed_promise = true;
901                         curr->set_read_from_promise(promise);
902                         mo_graph->startChanges();
903                         if (!check_recency(curr, promise))
904                                 priv->too_many_reads = true;
905                         updated = r_modification_order(curr, promise);
906                         mo_graph->commitChanges();
907                         break;
908                 }
909                 case READ_FROM_FUTURE: {
910                         /* Read from future value */
911                         struct future_value fv = node->get_future_value();
912                         Promise *promise = new Promise(curr, fv);
913                         curr->set_read_from_promise(promise);
914                         promises->push_back(promise);
915                         mo_graph->startChanges();
916                         updated = r_modification_order(curr, promise);
917                         mo_graph->commitChanges();
918                         break;
919                 }
920                 default:
921                         ASSERT(false);
922                 }
923                 get_thread(curr)->set_return_value(curr->get_return_value());
924                 return updated;
925         }
926 }
927
928 /**
929  * Processes a lock, trylock, or unlock model action.  @param curr is
930  * the read model action to process.
931  *
932  * The try lock operation checks whether the lock is taken.  If not,
933  * it falls to the normal lock operation case.  If so, it returns
934  * fail.
935  *
936  * The lock operation has already been checked that it is enabled, so
937  * it just grabs the lock and synchronizes with the previous unlock.
938  *
939  * The unlock operation has to re-enable all of the threads that are
940  * waiting on the lock.
941  *
942  * @return True if synchronization was updated; false otherwise
943  */
944 bool ModelChecker::process_mutex(ModelAction *curr)
945 {
946         std::mutex *mutex = curr->get_mutex();
947         struct std::mutex_state *state = NULL;
948
949         if (mutex)
950                 state = mutex->get_state();
951
952         switch (curr->get_type()) {
953         case ATOMIC_TRYLOCK: {
954                 bool success = !state->locked;
955                 curr->set_try_lock(success);
956                 if (!success) {
957                         get_thread(curr)->set_return_value(0);
958                         break;
959                 }
960                 get_thread(curr)->set_return_value(1);
961         }
962                 //otherwise fall into the lock case
963         case ATOMIC_LOCK: {
964                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
965                         assert_bug("Lock access before initialization");
966                 state->locked = get_thread(curr);
967                 ModelAction *unlock = get_last_unlock(curr);
968                 //synchronize with the previous unlock statement
969                 if (unlock != NULL) {
970                         curr->synchronize_with(unlock);
971                         return true;
972                 }
973                 break;
974         }
975         case ATOMIC_UNLOCK: {
976                 //unlock the lock
977                 state->locked = NULL;
978                 //wake up the other threads
979                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
980                 //activate all the waiting threads
981                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
982                         scheduler->wake(get_thread(*rit));
983                 }
984                 waiters->clear();
985                 break;
986         }
987         case ATOMIC_WAIT: {
988                 //unlock the lock
989                 state->locked = NULL;
990                 //wake up the other threads
991                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
992                 //activate all the waiting threads
993                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
994                         scheduler->wake(get_thread(*rit));
995                 }
996                 waiters->clear();
997                 //check whether we should go to sleep or not...simulate spurious failures
998                 if (curr->get_node()->get_misc() == 0) {
999                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1000                         //disable us
1001                         scheduler->sleep(get_thread(curr));
1002                 }
1003                 break;
1004         }
1005         case ATOMIC_NOTIFY_ALL: {
1006                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1007                 //activate all the waiting threads
1008                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1009                         scheduler->wake(get_thread(*rit));
1010                 }
1011                 waiters->clear();
1012                 break;
1013         }
1014         case ATOMIC_NOTIFY_ONE: {
1015                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1016                 int wakeupthread = curr->get_node()->get_misc();
1017                 action_list_t::iterator it = waiters->begin();
1018                 advance(it, wakeupthread);
1019                 scheduler->wake(get_thread(*it));
1020                 waiters->erase(it);
1021                 break;
1022         }
1023
1024         default:
1025                 ASSERT(0);
1026         }
1027         return false;
1028 }
1029
1030 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1031 {
1032         /* Do more ambitious checks now that mo is more complete */
1033         if (mo_may_allow(writer, reader)) {
1034                 Node *node = reader->get_node();
1035
1036                 /* Find an ancestor thread which exists at the time of the reader */
1037                 Thread *write_thread = get_thread(writer);
1038                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1039                         write_thread = write_thread->get_parent();
1040
1041                 struct future_value fv = {
1042                         writer->get_write_value(),
1043                         writer->get_seq_number() + params.maxfuturedelay,
1044                         write_thread->get_id(),
1045                 };
1046                 if (node->add_future_value(fv))
1047                         set_latest_backtrack(reader);
1048         }
1049 }
1050
1051 /**
1052  * Process a write ModelAction
1053  * @param curr The ModelAction to process
1054  * @return True if the mo_graph was updated or promises were resolved
1055  */
1056 bool ModelChecker::process_write(ModelAction *curr)
1057 {
1058         /* Readers to which we may send our future value */
1059         std::vector< ModelAction *, ModelAlloc<ModelAction *> > send_fv;
1060
1061         bool updated_mod_order = w_modification_order(curr, &send_fv);
1062         int promise_idx = get_promise_to_resolve(curr);
1063         const ModelAction *earliest_promise_reader;
1064         bool updated_promises = false;
1065
1066         if (promise_idx >= 0) {
1067                 earliest_promise_reader = (*promises)[promise_idx]->get_reader(0);
1068                 updated_promises = resolve_promise(curr, promise_idx);
1069         } else
1070                 earliest_promise_reader = NULL;
1071
1072         /* Don't send future values to reads after the Promise we resolve */
1073         for (unsigned int i = 0; i < send_fv.size(); i++) {
1074                 ModelAction *read = send_fv[i];
1075                 if (!earliest_promise_reader || *read < *earliest_promise_reader)
1076                         futurevalues->push_back(PendingFutureValue(curr, read));
1077         }
1078
1079         if (promises->size() == 0) {
1080                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1081                         struct PendingFutureValue pfv = (*futurevalues)[i];
1082                         add_future_value(pfv.writer, pfv.act);
1083                 }
1084                 futurevalues->clear();
1085         }
1086
1087         mo_graph->commitChanges();
1088         mo_check_promises(curr, false);
1089
1090         get_thread(curr)->set_return_value(VALUE_NONE);
1091         return updated_mod_order || updated_promises;
1092 }
1093
1094 /**
1095  * Process a fence ModelAction
1096  * @param curr The ModelAction to process
1097  * @return True if synchronization was updated
1098  */
1099 bool ModelChecker::process_fence(ModelAction *curr)
1100 {
1101         /*
1102          * fence-relaxed: no-op
1103          * fence-release: only log the occurence (not in this function), for
1104          *   use in later synchronization
1105          * fence-acquire (this function): search for hypothetical release
1106          *   sequences
1107          */
1108         bool updated = false;
1109         if (curr->is_acquire()) {
1110                 action_list_t *list = action_trace;
1111                 action_list_t::reverse_iterator rit;
1112                 /* Find X : is_read(X) && X --sb-> curr */
1113                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1114                         ModelAction *act = *rit;
1115                         if (act == curr)
1116                                 continue;
1117                         if (act->get_tid() != curr->get_tid())
1118                                 continue;
1119                         /* Stop at the beginning of the thread */
1120                         if (act->is_thread_start())
1121                                 break;
1122                         /* Stop once we reach a prior fence-acquire */
1123                         if (act->is_fence() && act->is_acquire())
1124                                 break;
1125                         if (!act->is_read())
1126                                 continue;
1127                         /* read-acquire will find its own release sequences */
1128                         if (act->is_acquire())
1129                                 continue;
1130
1131                         /* Establish hypothetical release sequences */
1132                         rel_heads_list_t release_heads;
1133                         get_release_seq_heads(curr, act, &release_heads);
1134                         for (unsigned int i = 0; i < release_heads.size(); i++)
1135                                 if (!curr->synchronize_with(release_heads[i]))
1136                                         set_bad_synchronization();
1137                         if (release_heads.size() != 0)
1138                                 updated = true;
1139                 }
1140         }
1141         return updated;
1142 }
1143
1144 /**
1145  * @brief Process the current action for thread-related activity
1146  *
1147  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1148  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1149  * synchronization, etc.  This function is a no-op for non-THREAD actions
1150  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1151  *
1152  * @param curr The current action
1153  * @return True if synchronization was updated or a thread completed
1154  */
1155 bool ModelChecker::process_thread_action(ModelAction *curr)
1156 {
1157         bool updated = false;
1158
1159         switch (curr->get_type()) {
1160         case THREAD_CREATE: {
1161                 thrd_t *thrd = (thrd_t *)curr->get_location();
1162                 struct thread_params *params = (struct thread_params *)curr->get_value();
1163                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1164                 add_thread(th);
1165                 th->set_creation(curr);
1166                 /* Promises can be satisfied by children */
1167                 for (unsigned int i = 0; i < promises->size(); i++) {
1168                         Promise *promise = (*promises)[i];
1169                         if (promise->thread_is_available(curr->get_tid()))
1170                                 promise->add_thread(th->get_id());
1171                 }
1172                 break;
1173         }
1174         case THREAD_JOIN: {
1175                 Thread *blocking = curr->get_thread_operand();
1176                 ModelAction *act = get_last_action(blocking->get_id());
1177                 curr->synchronize_with(act);
1178                 updated = true; /* trigger rel-seq checks */
1179                 break;
1180         }
1181         case THREAD_FINISH: {
1182                 Thread *th = get_thread(curr);
1183                 while (!th->wait_list_empty()) {
1184                         ModelAction *act = th->pop_wait_list();
1185                         scheduler->wake(get_thread(act));
1186                 }
1187                 th->complete();
1188                 /* Completed thread can't satisfy promises */
1189                 for (unsigned int i = 0; i < promises->size(); i++) {
1190                         Promise *promise = (*promises)[i];
1191                         if (promise->thread_is_available(th->get_id()))
1192                                 if (promise->eliminate_thread(th->get_id()))
1193                                         priv->failed_promise = true;
1194                 }
1195                 updated = true; /* trigger rel-seq checks */
1196                 break;
1197         }
1198         case THREAD_START: {
1199                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1200                 break;
1201         }
1202         default:
1203                 break;
1204         }
1205
1206         return updated;
1207 }
1208
1209 /**
1210  * @brief Process the current action for release sequence fixup activity
1211  *
1212  * Performs model-checker release sequence fixups for the current action,
1213  * forcing a single pending release sequence to break (with a given, potential
1214  * "loose" write) or to complete (i.e., synchronize). If a pending release
1215  * sequence forms a complete release sequence, then we must perform the fixup
1216  * synchronization, mo_graph additions, etc.
1217  *
1218  * @param curr The current action; must be a release sequence fixup action
1219  * @param work_queue The work queue to which to add work items as they are
1220  * generated
1221  */
1222 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1223 {
1224         const ModelAction *write = curr->get_node()->get_relseq_break();
1225         struct release_seq *sequence = pending_rel_seqs->back();
1226         pending_rel_seqs->pop_back();
1227         ASSERT(sequence);
1228         ModelAction *acquire = sequence->acquire;
1229         const ModelAction *rf = sequence->rf;
1230         const ModelAction *release = sequence->release;
1231         ASSERT(acquire);
1232         ASSERT(release);
1233         ASSERT(rf);
1234         ASSERT(release->same_thread(rf));
1235
1236         if (write == NULL) {
1237                 /**
1238                  * @todo Forcing a synchronization requires that we set
1239                  * modification order constraints. For instance, we can't allow
1240                  * a fixup sequence in which two separate read-acquire
1241                  * operations read from the same sequence, where the first one
1242                  * synchronizes and the other doesn't. Essentially, we can't
1243                  * allow any writes to insert themselves between 'release' and
1244                  * 'rf'
1245                  */
1246
1247                 /* Must synchronize */
1248                 if (!acquire->synchronize_with(release)) {
1249                         set_bad_synchronization();
1250                         return;
1251                 }
1252                 /* Re-check all pending release sequences */
1253                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1254                 /* Re-check act for mo_graph edges */
1255                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1256
1257                 /* propagate synchronization to later actions */
1258                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1259                 for (; (*rit) != acquire; rit++) {
1260                         ModelAction *propagate = *rit;
1261                         if (acquire->happens_before(propagate)) {
1262                                 propagate->synchronize_with(acquire);
1263                                 /* Re-check 'propagate' for mo_graph edges */
1264                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1265                         }
1266                 }
1267         } else {
1268                 /* Break release sequence with new edges:
1269                  *   release --mo--> write --mo--> rf */
1270                 mo_graph->addEdge(release, write);
1271                 mo_graph->addEdge(write, rf);
1272         }
1273
1274         /* See if we have realized a data race */
1275         checkDataRaces();
1276 }
1277
1278 /**
1279  * Initialize the current action by performing one or more of the following
1280  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1281  * in the NodeStack, manipulating backtracking sets, allocating and
1282  * initializing clock vectors, and computing the promises to fulfill.
1283  *
1284  * @param curr The current action, as passed from the user context; may be
1285  * freed/invalidated after the execution of this function, with a different
1286  * action "returned" its place (pass-by-reference)
1287  * @return True if curr is a newly-explored action; false otherwise
1288  */
1289 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1290 {
1291         ModelAction *newcurr;
1292
1293         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1294                 newcurr = process_rmw(*curr);
1295                 delete *curr;
1296
1297                 if (newcurr->is_rmw())
1298                         compute_promises(newcurr);
1299
1300                 *curr = newcurr;
1301                 return false;
1302         }
1303
1304         (*curr)->set_seq_number(get_next_seq_num());
1305
1306         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1307         if (newcurr) {
1308                 /* First restore type and order in case of RMW operation */
1309                 if ((*curr)->is_rmwr())
1310                         newcurr->copy_typeandorder(*curr);
1311
1312                 ASSERT((*curr)->get_location() == newcurr->get_location());
1313                 newcurr->copy_from_new(*curr);
1314
1315                 /* Discard duplicate ModelAction; use action from NodeStack */
1316                 delete *curr;
1317
1318                 /* Always compute new clock vector */
1319                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1320
1321                 *curr = newcurr;
1322                 return false; /* Action was explored previously */
1323         } else {
1324                 newcurr = *curr;
1325
1326                 /* Always compute new clock vector */
1327                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1328
1329                 /* Assign most recent release fence */
1330                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1331
1332                 /*
1333                  * Perform one-time actions when pushing new ModelAction onto
1334                  * NodeStack
1335                  */
1336                 if (newcurr->is_write())
1337                         compute_promises(newcurr);
1338                 else if (newcurr->is_relseq_fixup())
1339                         compute_relseq_breakwrites(newcurr);
1340                 else if (newcurr->is_wait())
1341                         newcurr->get_node()->set_misc_max(2);
1342                 else if (newcurr->is_notify_one()) {
1343                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1344                 }
1345                 return true; /* This was a new ModelAction */
1346         }
1347 }
1348
1349 /**
1350  * @brief Establish reads-from relation between two actions
1351  *
1352  * Perform basic operations involved with establishing a concrete rf relation,
1353  * including setting the ModelAction data and checking for release sequences.
1354  *
1355  * @param act The action that is reading (must be a read)
1356  * @param rf The action from which we are reading (must be a write)
1357  *
1358  * @return True if this read established synchronization
1359  */
1360 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1361 {
1362         ASSERT(rf);
1363         act->set_read_from(rf);
1364         if (act->is_acquire()) {
1365                 rel_heads_list_t release_heads;
1366                 get_release_seq_heads(act, act, &release_heads);
1367                 int num_heads = release_heads.size();
1368                 for (unsigned int i = 0; i < release_heads.size(); i++)
1369                         if (!act->synchronize_with(release_heads[i])) {
1370                                 set_bad_synchronization();
1371                                 num_heads--;
1372                         }
1373                 return num_heads > 0;
1374         }
1375         return false;
1376 }
1377
1378 /**
1379  * Check promises and eliminate potentially-satisfying threads when a thread is
1380  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1381  * no longer satisfy a promise generated from that thread.
1382  *
1383  * @param blocker The thread on which a thread is waiting
1384  * @param waiting The waiting thread
1385  */
1386 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1387 {
1388         for (unsigned int i = 0; i < promises->size(); i++) {
1389                 Promise *promise = (*promises)[i];
1390                 if (!promise->thread_is_available(waiting->get_id()))
1391                         continue;
1392                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1393                         ModelAction *reader = promise->get_reader(j);
1394                         if (reader->get_tid() != blocker->get_id())
1395                                 continue;
1396                         if (promise->eliminate_thread(waiting->get_id())) {
1397                                 /* Promise has failed */
1398                                 priv->failed_promise = true;
1399                         } else {
1400                                 /* Only eliminate the 'waiting' thread once */
1401                                 return;
1402                         }
1403                 }
1404         }
1405 }
1406
1407 /**
1408  * @brief Check whether a model action is enabled.
1409  *
1410  * Checks whether a lock or join operation would be successful (i.e., is the
1411  * lock already locked, or is the joined thread already complete). If not, put
1412  * the action in a waiter list.
1413  *
1414  * @param curr is the ModelAction to check whether it is enabled.
1415  * @return a bool that indicates whether the action is enabled.
1416  */
1417 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1418         if (curr->is_lock()) {
1419                 std::mutex *lock = (std::mutex *)curr->get_location();
1420                 struct std::mutex_state *state = lock->get_state();
1421                 if (state->locked) {
1422                         //Stick the action in the appropriate waiting queue
1423                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1424                         return false;
1425                 }
1426         } else if (curr->get_type() == THREAD_JOIN) {
1427                 Thread *blocking = (Thread *)curr->get_location();
1428                 if (!blocking->is_complete()) {
1429                         blocking->push_wait_list(curr);
1430                         thread_blocking_check_promises(blocking, get_thread(curr));
1431                         return false;
1432                 }
1433         }
1434
1435         return true;
1436 }
1437
1438 /**
1439  * This is the heart of the model checker routine. It performs model-checking
1440  * actions corresponding to a given "current action." Among other processes, it
1441  * calculates reads-from relationships, updates synchronization clock vectors,
1442  * forms a memory_order constraints graph, and handles replay/backtrack
1443  * execution when running permutations of previously-observed executions.
1444  *
1445  * @param curr The current action to process
1446  * @return The ModelAction that is actually executed; may be different than
1447  * curr; may be NULL, if the current action is not enabled to run
1448  */
1449 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1450 {
1451         ASSERT(curr);
1452         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1453
1454         if (!check_action_enabled(curr)) {
1455                 /* Make the execution look like we chose to run this action
1456                  * much later, when a lock/join can succeed */
1457                 get_thread(curr)->set_pending(curr);
1458                 scheduler->sleep(get_thread(curr));
1459                 return NULL;
1460         }
1461
1462         bool newly_explored = initialize_curr_action(&curr);
1463
1464         DBG();
1465         if (DBG_ENABLED())
1466                 curr->print();
1467
1468         wake_up_sleeping_actions(curr);
1469
1470         /* Add the action to lists before any other model-checking tasks */
1471         if (!second_part_of_rmw)
1472                 add_action_to_lists(curr);
1473
1474         /* Build may_read_from set for newly-created actions */
1475         if (newly_explored && curr->is_read())
1476                 build_may_read_from(curr);
1477
1478         /* Initialize work_queue with the "current action" work */
1479         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1480         while (!work_queue.empty() && !has_asserted()) {
1481                 WorkQueueEntry work = work_queue.front();
1482                 work_queue.pop_front();
1483
1484                 switch (work.type) {
1485                 case WORK_CHECK_CURR_ACTION: {
1486                         ModelAction *act = work.action;
1487                         bool update = false; /* update this location's release seq's */
1488                         bool update_all = false; /* update all release seq's */
1489
1490                         if (process_thread_action(curr))
1491                                 update_all = true;
1492
1493                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1494                                 update = true;
1495
1496                         if (act->is_write() && process_write(act))
1497                                 update = true;
1498
1499                         if (act->is_fence() && process_fence(act))
1500                                 update_all = true;
1501
1502                         if (act->is_mutex_op() && process_mutex(act))
1503                                 update_all = true;
1504
1505                         if (act->is_relseq_fixup())
1506                                 process_relseq_fixup(curr, &work_queue);
1507
1508                         if (update_all)
1509                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1510                         else if (update)
1511                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1512                         break;
1513                 }
1514                 case WORK_CHECK_RELEASE_SEQ:
1515                         resolve_release_sequences(work.location, &work_queue);
1516                         break;
1517                 case WORK_CHECK_MO_EDGES: {
1518                         /** @todo Complete verification of work_queue */
1519                         ModelAction *act = work.action;
1520                         bool updated = false;
1521
1522                         if (act->is_read()) {
1523                                 const ModelAction *rf = act->get_reads_from();
1524                                 const Promise *promise = act->get_reads_from_promise();
1525                                 if (rf) {
1526                                         if (r_modification_order(act, rf))
1527                                                 updated = true;
1528                                 } else if (promise) {
1529                                         if (r_modification_order(act, promise))
1530                                                 updated = true;
1531                                 }
1532                         }
1533                         if (act->is_write()) {
1534                                 if (w_modification_order(act, NULL))
1535                                         updated = true;
1536                         }
1537                         mo_graph->commitChanges();
1538
1539                         if (updated)
1540                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1541                         break;
1542                 }
1543                 default:
1544                         ASSERT(false);
1545                         break;
1546                 }
1547         }
1548
1549         check_curr_backtracking(curr);
1550         set_backtracking(curr);
1551         return curr;
1552 }
1553
1554 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1555 {
1556         Node *currnode = curr->get_node();
1557         Node *parnode = currnode->get_parent();
1558
1559         if ((parnode && !parnode->backtrack_empty()) ||
1560                          !currnode->misc_empty() ||
1561                          !currnode->read_from_empty() ||
1562                          !currnode->promise_empty() ||
1563                          !currnode->relseq_break_empty()) {
1564                 set_latest_backtrack(curr);
1565         }
1566 }
1567
1568 bool ModelChecker::promises_expired() const
1569 {
1570         for (unsigned int i = 0; i < promises->size(); i++) {
1571                 Promise *promise = (*promises)[i];
1572                 if (promise->get_expiration() < priv->used_sequence_numbers)
1573                         return true;
1574         }
1575         return false;
1576 }
1577
1578 /**
1579  * This is the strongest feasibility check available.
1580  * @return whether the current trace (partial or complete) must be a prefix of
1581  * a feasible trace.
1582  */
1583 bool ModelChecker::isfeasibleprefix() const
1584 {
1585         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1586 }
1587
1588 /**
1589  * Print disagnostic information about an infeasible execution
1590  * @param prefix A string to prefix the output with; if NULL, then a default
1591  * message prefix will be provided
1592  */
1593 void ModelChecker::print_infeasibility(const char *prefix) const
1594 {
1595         char buf[100];
1596         char *ptr = buf;
1597         if (mo_graph->checkForCycles())
1598                 ptr += sprintf(ptr, "[mo cycle]");
1599         if (priv->failed_promise)
1600                 ptr += sprintf(ptr, "[failed promise]");
1601         if (priv->too_many_reads)
1602                 ptr += sprintf(ptr, "[too many reads]");
1603         if (priv->no_valid_reads)
1604                 ptr += sprintf(ptr, "[no valid reads-from]");
1605         if (priv->bad_synchronization)
1606                 ptr += sprintf(ptr, "[bad sw ordering]");
1607         if (promises_expired())
1608                 ptr += sprintf(ptr, "[promise expired]");
1609         if (promises->size() != 0)
1610                 ptr += sprintf(ptr, "[unresolved promise]");
1611         if (ptr != buf)
1612                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1613 }
1614
1615 /**
1616  * Returns whether the current completed trace is feasible, except for pending
1617  * release sequences.
1618  */
1619 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1620 {
1621         return !is_infeasible() && promises->size() == 0;
1622 }
1623
1624 /**
1625  * Check if the current partial trace is infeasible. Does not check any
1626  * end-of-execution flags, which might rule out the execution. Thus, this is
1627  * useful only for ruling an execution as infeasible.
1628  * @return whether the current partial trace is infeasible.
1629  */
1630 bool ModelChecker::is_infeasible() const
1631 {
1632         return mo_graph->checkForCycles() ||
1633                 priv->no_valid_reads ||
1634                 priv->failed_promise ||
1635                 priv->too_many_reads ||
1636                 priv->bad_synchronization ||
1637                 promises_expired();
1638 }
1639
1640 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1641 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1642         ModelAction *lastread = get_last_action(act->get_tid());
1643         lastread->process_rmw(act);
1644         if (act->is_rmw()) {
1645                 if (lastread->get_reads_from())
1646                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1647                 else
1648                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1649                 mo_graph->commitChanges();
1650         }
1651         return lastread;
1652 }
1653
1654 /**
1655  * A helper function for ModelChecker::check_recency, to check if the current
1656  * thread is able to read from a different write/promise for 'params.maxreads'
1657  * number of steps and if that write/promise should become visible (i.e., is
1658  * ordered later in the modification order). This helps model memory liveness.
1659  *
1660  * @param curr The current action. Must be a read.
1661  * @param rf The write/promise from which we plan to read
1662  * @param other_rf The write/promise from which we may read
1663  * @return True if we were able to read from other_rf for params.maxreads steps
1664  */
1665 template <typename T, typename U>
1666 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1667 {
1668         /* Need a different write/promise */
1669         if (other_rf->equals(rf))
1670                 return false;
1671
1672         /* Only look for "newer" writes/promises */
1673         if (!mo_graph->checkReachable(rf, other_rf))
1674                 return false;
1675
1676         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1677         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1678         action_list_t::reverse_iterator rit = list->rbegin();
1679         ASSERT((*rit) == curr);
1680         /* Skip past curr */
1681         rit++;
1682
1683         /* Does this write/promise work for everyone? */
1684         for (int i = 0; i < params.maxreads; i++, rit++) {
1685                 ModelAction *act = *rit;
1686                 if (!act->may_read_from(other_rf))
1687                         return false;
1688         }
1689         return true;
1690 }
1691
1692 /**
1693  * Checks whether a thread has read from the same write or Promise for too many
1694  * times without seeing the effects of a later write/Promise.
1695  *
1696  * Basic idea:
1697  * 1) there must a different write/promise that we could read from,
1698  * 2) we must have read from the same write/promise in excess of maxreads times,
1699  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1700  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1701  *
1702  * If so, we decide that the execution is no longer feasible.
1703  *
1704  * @param curr The current action. Must be a read.
1705  * @param rf The ModelAction/Promise from which we might read.
1706  * @return True if the read should succeed; false otherwise
1707  */
1708 template <typename T>
1709 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1710 {
1711         if (!params.maxreads)
1712                 return true;
1713
1714         //NOTE: Next check is just optimization, not really necessary....
1715         if (curr->get_node()->get_read_from_past_size() +
1716                         curr->get_node()->get_read_from_promise_size() <= 1)
1717                 return true;
1718
1719         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1720         int tid = id_to_int(curr->get_tid());
1721         ASSERT(tid < (int)thrd_lists->size());
1722         action_list_t *list = &(*thrd_lists)[tid];
1723         action_list_t::reverse_iterator rit = list->rbegin();
1724         ASSERT((*rit) == curr);
1725         /* Skip past curr */
1726         rit++;
1727
1728         action_list_t::reverse_iterator ritcopy = rit;
1729         /* See if we have enough reads from the same value */
1730         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1731                 if (ritcopy == list->rend())
1732                         return true;
1733                 ModelAction *act = *ritcopy;
1734                 if (!act->is_read())
1735                         return true;
1736                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1737                         return true;
1738                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1739                         return true;
1740                 if (act->get_node()->get_read_from_past_size() +
1741                                 act->get_node()->get_read_from_promise_size() <= 1)
1742                         return true;
1743         }
1744         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1745                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1746                 if (should_read_instead(curr, rf, write))
1747                         return false; /* liveness failure */
1748         }
1749         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1750                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1751                 if (should_read_instead(curr, rf, promise))
1752                         return false; /* liveness failure */
1753         }
1754         return true;
1755 }
1756
1757 /**
1758  * Updates the mo_graph with the constraints imposed from the current
1759  * read.
1760  *
1761  * Basic idea is the following: Go through each other thread and find
1762  * the last action that happened before our read.  Two cases:
1763  *
1764  * (1) The action is a write => that write must either occur before
1765  * the write we read from or be the write we read from.
1766  *
1767  * (2) The action is a read => the write that that action read from
1768  * must occur before the write we read from or be the same write.
1769  *
1770  * @param curr The current action. Must be a read.
1771  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1772  * @return True if modification order edges were added; false otherwise
1773  */
1774 template <typename rf_type>
1775 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1776 {
1777         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1778         unsigned int i;
1779         bool added = false;
1780         ASSERT(curr->is_read());
1781
1782         /* Last SC fence in the current thread */
1783         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1784
1785         /* Iterate over all threads */
1786         for (i = 0; i < thrd_lists->size(); i++) {
1787                 /* Last SC fence in thread i */
1788                 ModelAction *last_sc_fence_thread_local = NULL;
1789                 if (int_to_id((int)i) != curr->get_tid())
1790                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1791
1792                 /* Last SC fence in thread i, before last SC fence in current thread */
1793                 ModelAction *last_sc_fence_thread_before = NULL;
1794                 if (last_sc_fence_local)
1795                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1796
1797                 /* Iterate over actions in thread, starting from most recent */
1798                 action_list_t *list = &(*thrd_lists)[i];
1799                 action_list_t::reverse_iterator rit;
1800                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1801                         ModelAction *act = *rit;
1802
1803                         if (act->is_write() && !act->equals(rf) && act != curr) {
1804                                 /* C++, Section 29.3 statement 5 */
1805                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1806                                                 *act < *last_sc_fence_thread_local) {
1807                                         added = mo_graph->addEdge(act, rf) || added;
1808                                         break;
1809                                 }
1810                                 /* C++, Section 29.3 statement 4 */
1811                                 else if (act->is_seqcst() && last_sc_fence_local &&
1812                                                 *act < *last_sc_fence_local) {
1813                                         added = mo_graph->addEdge(act, rf) || added;
1814                                         break;
1815                                 }
1816                                 /* C++, Section 29.3 statement 6 */
1817                                 else if (last_sc_fence_thread_before &&
1818                                                 *act < *last_sc_fence_thread_before) {
1819                                         added = mo_graph->addEdge(act, rf) || added;
1820                                         break;
1821                                 }
1822                         }
1823
1824                         /*
1825                          * Include at most one act per-thread that "happens
1826                          * before" curr. Don't consider reflexively.
1827                          */
1828                         if (act->happens_before(curr) && act != curr) {
1829                                 if (act->is_write()) {
1830                                         if (!act->equals(rf)) {
1831                                                 added = mo_graph->addEdge(act, rf) || added;
1832                                         }
1833                                 } else {
1834                                         const ModelAction *prevrf = act->get_reads_from();
1835                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1836                                         if (prevrf) {
1837                                                 if (!prevrf->equals(rf))
1838                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1839                                         } else if (!prevrf_promise->equals(rf)) {
1840                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1841                                         }
1842                                 }
1843                                 break;
1844                         }
1845                 }
1846         }
1847
1848         /*
1849          * All compatible, thread-exclusive promises must be ordered after any
1850          * concrete loads from the same thread
1851          */
1852         for (unsigned int i = 0; i < promises->size(); i++)
1853                 if ((*promises)[i]->is_compatible_exclusive(curr))
1854                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1855
1856         return added;
1857 }
1858
1859 /**
1860  * Updates the mo_graph with the constraints imposed from the current write.
1861  *
1862  * Basic idea is the following: Go through each other thread and find
1863  * the lastest action that happened before our write.  Two cases:
1864  *
1865  * (1) The action is a write => that write must occur before
1866  * the current write
1867  *
1868  * (2) The action is a read => the write that that action read from
1869  * must occur before the current write.
1870  *
1871  * This method also handles two other issues:
1872  *
1873  * (I) Sequential Consistency: Making sure that if the current write is
1874  * seq_cst, that it occurs after the previous seq_cst write.
1875  *
1876  * (II) Sending the write back to non-synchronizing reads.
1877  *
1878  * @param curr The current action. Must be a write.
1879  * @param send_fv A vector for stashing reads to which we may pass our future
1880  * value. If NULL, then don't record any future values.
1881  * @return True if modification order edges were added; false otherwise
1882  */
1883 bool ModelChecker::w_modification_order(ModelAction *curr, std::vector< ModelAction *, ModelAlloc<ModelAction *> > *send_fv)
1884 {
1885         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1886         unsigned int i;
1887         bool added = false;
1888         ASSERT(curr->is_write());
1889
1890         if (curr->is_seqcst()) {
1891                 /* We have to at least see the last sequentially consistent write,
1892                          so we are initialized. */
1893                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1894                 if (last_seq_cst != NULL) {
1895                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1896                 }
1897         }
1898
1899         /* Last SC fence in the current thread */
1900         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1901
1902         /* Iterate over all threads */
1903         for (i = 0; i < thrd_lists->size(); i++) {
1904                 /* Last SC fence in thread i, before last SC fence in current thread */
1905                 ModelAction *last_sc_fence_thread_before = NULL;
1906                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1907                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1908
1909                 /* Iterate over actions in thread, starting from most recent */
1910                 action_list_t *list = &(*thrd_lists)[i];
1911                 action_list_t::reverse_iterator rit;
1912                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1913                         ModelAction *act = *rit;
1914                         if (act == curr) {
1915                                 /*
1916                                  * 1) If RMW and it actually read from something, then we
1917                                  * already have all relevant edges, so just skip to next
1918                                  * thread.
1919                                  *
1920                                  * 2) If RMW and it didn't read from anything, we should
1921                                  * whatever edge we can get to speed up convergence.
1922                                  *
1923                                  * 3) If normal write, we need to look at earlier actions, so
1924                                  * continue processing list.
1925                                  */
1926                                 if (curr->is_rmw()) {
1927                                         if (curr->get_reads_from() != NULL)
1928                                                 break;
1929                                         else
1930                                                 continue;
1931                                 } else
1932                                         continue;
1933                         }
1934
1935                         /* C++, Section 29.3 statement 7 */
1936                         if (last_sc_fence_thread_before && act->is_write() &&
1937                                         *act < *last_sc_fence_thread_before) {
1938                                 added = mo_graph->addEdge(act, curr) || added;
1939                                 break;
1940                         }
1941
1942                         /*
1943                          * Include at most one act per-thread that "happens
1944                          * before" curr
1945                          */
1946                         if (act->happens_before(curr)) {
1947                                 /*
1948                                  * Note: if act is RMW, just add edge:
1949                                  *   act --mo--> curr
1950                                  * The following edge should be handled elsewhere:
1951                                  *   readfrom(act) --mo--> act
1952                                  */
1953                                 if (act->is_write())
1954                                         added = mo_graph->addEdge(act, curr) || added;
1955                                 else if (act->is_read()) {
1956                                         //if previous read accessed a null, just keep going
1957                                         if (act->get_reads_from() == NULL)
1958                                                 continue;
1959                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1960                                 }
1961                                 break;
1962                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1963                                                      !act->same_thread(curr)) {
1964                                 /* We have an action that:
1965                                    (1) did not happen before us
1966                                    (2) is a read and we are a write
1967                                    (3) cannot synchronize with us
1968                                    (4) is in a different thread
1969                                    =>
1970                                    that read could potentially read from our write.  Note that
1971                                    these checks are overly conservative at this point, we'll
1972                                    do more checks before actually removing the
1973                                    pendingfuturevalue.
1974
1975                                  */
1976                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1977                                         if (!is_infeasible())
1978                                                 send_fv->push_back(act);
1979                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1980                                                 add_future_value(curr, act);
1981                                 }
1982                         }
1983                 }
1984         }
1985
1986         /*
1987          * All compatible, thread-exclusive promises must be ordered after any
1988          * concrete stores to the same thread, or else they can be merged with
1989          * this store later
1990          */
1991         for (unsigned int i = 0; i < promises->size(); i++)
1992                 if ((*promises)[i]->is_compatible_exclusive(curr))
1993                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1994
1995         return added;
1996 }
1997
1998 /** Arbitrary reads from the future are not allowed.  Section 29.3
1999  * part 9 places some constraints.  This method checks one result of constraint
2000  * constraint.  Others require compiler support. */
2001 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
2002 {
2003         if (!writer->is_rmw())
2004                 return true;
2005
2006         if (!reader->is_rmw())
2007                 return true;
2008
2009         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2010                 if (search == reader)
2011                         return false;
2012                 if (search->get_tid() == reader->get_tid() &&
2013                                 search->happens_before(reader))
2014                         break;
2015         }
2016
2017         return true;
2018 }
2019
2020 /**
2021  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2022  * some constraints. This method checks one the following constraint (others
2023  * require compiler support):
2024  *
2025  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2026  */
2027 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2028 {
2029         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2030         unsigned int i;
2031         /* Iterate over all threads */
2032         for (i = 0; i < thrd_lists->size(); i++) {
2033                 const ModelAction *write_after_read = NULL;
2034
2035                 /* Iterate over actions in thread, starting from most recent */
2036                 action_list_t *list = &(*thrd_lists)[i];
2037                 action_list_t::reverse_iterator rit;
2038                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2039                         ModelAction *act = *rit;
2040
2041                         /* Don't disallow due to act == reader */
2042                         if (!reader->happens_before(act) || reader == act)
2043                                 break;
2044                         else if (act->is_write())
2045                                 write_after_read = act;
2046                         else if (act->is_read() && act->get_reads_from() != NULL)
2047                                 write_after_read = act->get_reads_from();
2048                 }
2049
2050                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2051                         return false;
2052         }
2053         return true;
2054 }
2055
2056 /**
2057  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2058  * The ModelAction under consideration is expected to be taking part in
2059  * release/acquire synchronization as an object of the "reads from" relation.
2060  * Note that this can only provide release sequence support for RMW chains
2061  * which do not read from the future, as those actions cannot be traced until
2062  * their "promise" is fulfilled. Similarly, we may not even establish the
2063  * presence of a release sequence with certainty, as some modification order
2064  * constraints may be decided further in the future. Thus, this function
2065  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2066  * and a boolean representing certainty.
2067  *
2068  * @param rf The action that might be part of a release sequence. Must be a
2069  * write.
2070  * @param release_heads A pass-by-reference style return parameter. After
2071  * execution of this function, release_heads will contain the heads of all the
2072  * relevant release sequences, if any exists with certainty
2073  * @param pending A pass-by-reference style return parameter which is only used
2074  * when returning false (i.e., uncertain). Returns most information regarding
2075  * an uncertain release sequence, including any write operations that might
2076  * break the sequence.
2077  * @return true, if the ModelChecker is certain that release_heads is complete;
2078  * false otherwise
2079  */
2080 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2081                 rel_heads_list_t *release_heads,
2082                 struct release_seq *pending) const
2083 {
2084         /* Only check for release sequences if there are no cycles */
2085         if (mo_graph->checkForCycles())
2086                 return false;
2087
2088         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2089                 ASSERT(rf->is_write());
2090
2091                 if (rf->is_release())
2092                         release_heads->push_back(rf);
2093                 else if (rf->get_last_fence_release())
2094                         release_heads->push_back(rf->get_last_fence_release());
2095                 if (!rf->is_rmw())
2096                         break; /* End of RMW chain */
2097
2098                 /** @todo Need to be smarter here...  In the linux lock
2099                  * example, this will run to the beginning of the program for
2100                  * every acquire. */
2101                 /** @todo The way to be smarter here is to keep going until 1
2102                  * thread has a release preceded by an acquire and you've seen
2103                  *       both. */
2104
2105                 /* acq_rel RMW is a sufficient stopping condition */
2106                 if (rf->is_acquire() && rf->is_release())
2107                         return true; /* complete */
2108         };
2109         if (!rf) {
2110                 /* read from future: need to settle this later */
2111                 pending->rf = NULL;
2112                 return false; /* incomplete */
2113         }
2114
2115         if (rf->is_release())
2116                 return true; /* complete */
2117
2118         /* else relaxed write
2119          * - check for fence-release in the same thread (29.8, stmt. 3)
2120          * - check modification order for contiguous subsequence
2121          *   -> rf must be same thread as release */
2122
2123         const ModelAction *fence_release = rf->get_last_fence_release();
2124         /* Synchronize with a fence-release unconditionally; we don't need to
2125          * find any more "contiguous subsequence..." for it */
2126         if (fence_release)
2127                 release_heads->push_back(fence_release);
2128
2129         int tid = id_to_int(rf->get_tid());
2130         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2131         action_list_t *list = &(*thrd_lists)[tid];
2132         action_list_t::const_reverse_iterator rit;
2133
2134         /* Find rf in the thread list */
2135         rit = std::find(list->rbegin(), list->rend(), rf);
2136         ASSERT(rit != list->rend());
2137
2138         /* Find the last {write,fence}-release */
2139         for (; rit != list->rend(); rit++) {
2140                 if (fence_release && *(*rit) < *fence_release)
2141                         break;
2142                 if ((*rit)->is_release())
2143                         break;
2144         }
2145         if (rit == list->rend()) {
2146                 /* No write-release in this thread */
2147                 return true; /* complete */
2148         } else if (fence_release && *(*rit) < *fence_release) {
2149                 /* The fence-release is more recent (and so, "stronger") than
2150                  * the most recent write-release */
2151                 return true; /* complete */
2152         } /* else, need to establish contiguous release sequence */
2153         ModelAction *release = *rit;
2154
2155         ASSERT(rf->same_thread(release));
2156
2157         pending->writes.clear();
2158
2159         bool certain = true;
2160         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2161                 if (id_to_int(rf->get_tid()) == (int)i)
2162                         continue;
2163                 list = &(*thrd_lists)[i];
2164
2165                 /* Can we ensure no future writes from this thread may break
2166                  * the release seq? */
2167                 bool future_ordered = false;
2168
2169                 ModelAction *last = get_last_action(int_to_id(i));
2170                 Thread *th = get_thread(int_to_id(i));
2171                 if ((last && rf->happens_before(last)) ||
2172                                 !is_enabled(th) ||
2173                                 th->is_complete())
2174                         future_ordered = true;
2175
2176                 ASSERT(!th->is_model_thread() || future_ordered);
2177
2178                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2179                         const ModelAction *act = *rit;
2180                         /* Reach synchronization -> this thread is complete */
2181                         if (act->happens_before(release))
2182                                 break;
2183                         if (rf->happens_before(act)) {
2184                                 future_ordered = true;
2185                                 continue;
2186                         }
2187
2188                         /* Only non-RMW writes can break release sequences */
2189                         if (!act->is_write() || act->is_rmw())
2190                                 continue;
2191
2192                         /* Check modification order */
2193                         if (mo_graph->checkReachable(rf, act)) {
2194                                 /* rf --mo--> act */
2195                                 future_ordered = true;
2196                                 continue;
2197                         }
2198                         if (mo_graph->checkReachable(act, release))
2199                                 /* act --mo--> release */
2200                                 break;
2201                         if (mo_graph->checkReachable(release, act) &&
2202                                       mo_graph->checkReachable(act, rf)) {
2203                                 /* release --mo-> act --mo--> rf */
2204                                 return true; /* complete */
2205                         }
2206                         /* act may break release sequence */
2207                         pending->writes.push_back(act);
2208                         certain = false;
2209                 }
2210                 if (!future_ordered)
2211                         certain = false; /* This thread is uncertain */
2212         }
2213
2214         if (certain) {
2215                 release_heads->push_back(release);
2216                 pending->writes.clear();
2217         } else {
2218                 pending->release = release;
2219                 pending->rf = rf;
2220         }
2221         return certain;
2222 }
2223
2224 /**
2225  * An interface for getting the release sequence head(s) with which a
2226  * given ModelAction must synchronize. This function only returns a non-empty
2227  * result when it can locate a release sequence head with certainty. Otherwise,
2228  * it may mark the internal state of the ModelChecker so that it will handle
2229  * the release sequence at a later time, causing @a acquire to update its
2230  * synchronization at some later point in execution.
2231  *
2232  * @param acquire The 'acquire' action that may synchronize with a release
2233  * sequence
2234  * @param read The read action that may read from a release sequence; this may
2235  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2236  * when 'acquire' is a fence-acquire)
2237  * @param release_heads A pass-by-reference return parameter. Will be filled
2238  * with the head(s) of the release sequence(s), if they exists with certainty.
2239  * @see ModelChecker::release_seq_heads
2240  */
2241 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2242                 ModelAction *read, rel_heads_list_t *release_heads)
2243 {
2244         const ModelAction *rf = read->get_reads_from();
2245         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2246         sequence->acquire = acquire;
2247         sequence->read = read;
2248
2249         if (!release_seq_heads(rf, release_heads, sequence)) {
2250                 /* add act to 'lazy checking' list */
2251                 pending_rel_seqs->push_back(sequence);
2252         } else {
2253                 snapshot_free(sequence);
2254         }
2255 }
2256
2257 /**
2258  * Attempt to resolve all stashed operations that might synchronize with a
2259  * release sequence for a given location. This implements the "lazy" portion of
2260  * determining whether or not a release sequence was contiguous, since not all
2261  * modification order information is present at the time an action occurs.
2262  *
2263  * @param location The location/object that should be checked for release
2264  * sequence resolutions. A NULL value means to check all locations.
2265  * @param work_queue The work queue to which to add work items as they are
2266  * generated
2267  * @return True if any updates occurred (new synchronization, new mo_graph
2268  * edges)
2269  */
2270 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2271 {
2272         bool updated = false;
2273         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2274         while (it != pending_rel_seqs->end()) {
2275                 struct release_seq *pending = *it;
2276                 ModelAction *acquire = pending->acquire;
2277                 const ModelAction *read = pending->read;
2278
2279                 /* Only resolve sequences on the given location, if provided */
2280                 if (location && read->get_location() != location) {
2281                         it++;
2282                         continue;
2283                 }
2284
2285                 const ModelAction *rf = read->get_reads_from();
2286                 rel_heads_list_t release_heads;
2287                 bool complete;
2288                 complete = release_seq_heads(rf, &release_heads, pending);
2289                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2290                         if (!acquire->has_synchronized_with(release_heads[i])) {
2291                                 if (acquire->synchronize_with(release_heads[i]))
2292                                         updated = true;
2293                                 else
2294                                         set_bad_synchronization();
2295                         }
2296                 }
2297
2298                 if (updated) {
2299                         /* Re-check all pending release sequences */
2300                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2301                         /* Re-check read-acquire for mo_graph edges */
2302                         if (acquire->is_read())
2303                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2304
2305                         /* propagate synchronization to later actions */
2306                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2307                         for (; (*rit) != acquire; rit++) {
2308                                 ModelAction *propagate = *rit;
2309                                 if (acquire->happens_before(propagate)) {
2310                                         propagate->synchronize_with(acquire);
2311                                         /* Re-check 'propagate' for mo_graph edges */
2312                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2313                                 }
2314                         }
2315                 }
2316                 if (complete) {
2317                         it = pending_rel_seqs->erase(it);
2318                         snapshot_free(pending);
2319                 } else {
2320                         it++;
2321                 }
2322         }
2323
2324         // If we resolved promises or data races, see if we have realized a data race.
2325         checkDataRaces();
2326
2327         return updated;
2328 }
2329
2330 /**
2331  * Performs various bookkeeping operations for the current ModelAction. For
2332  * instance, adds action to the per-object, per-thread action vector and to the
2333  * action trace list of all thread actions.
2334  *
2335  * @param act is the ModelAction to add.
2336  */
2337 void ModelChecker::add_action_to_lists(ModelAction *act)
2338 {
2339         int tid = id_to_int(act->get_tid());
2340         ModelAction *uninit = NULL;
2341         int uninit_id = -1;
2342         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2343         if (list->empty() && act->is_atomic_var()) {
2344                 uninit = new_uninitialized_action(act->get_location());
2345                 uninit_id = id_to_int(uninit->get_tid());
2346                 list->push_back(uninit);
2347         }
2348         list->push_back(act);
2349
2350         action_trace->push_back(act);
2351         if (uninit)
2352                 action_trace->push_front(uninit);
2353
2354         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2355         if (tid >= (int)vec->size())
2356                 vec->resize(priv->next_thread_id);
2357         (*vec)[tid].push_back(act);
2358         if (uninit)
2359                 (*vec)[uninit_id].push_front(uninit);
2360
2361         if ((int)thrd_last_action->size() <= tid)
2362                 thrd_last_action->resize(get_num_threads());
2363         (*thrd_last_action)[tid] = act;
2364         if (uninit)
2365                 (*thrd_last_action)[uninit_id] = uninit;
2366
2367         if (act->is_fence() && act->is_release()) {
2368                 if ((int)thrd_last_fence_release->size() <= tid)
2369                         thrd_last_fence_release->resize(get_num_threads());
2370                 (*thrd_last_fence_release)[tid] = act;
2371         }
2372
2373         if (act->is_wait()) {
2374                 void *mutex_loc = (void *) act->get_value();
2375                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2376
2377                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2378                 if (tid >= (int)vec->size())
2379                         vec->resize(priv->next_thread_id);
2380                 (*vec)[tid].push_back(act);
2381         }
2382 }
2383
2384 /**
2385  * @brief Get the last action performed by a particular Thread
2386  * @param tid The thread ID of the Thread in question
2387  * @return The last action in the thread
2388  */
2389 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2390 {
2391         int threadid = id_to_int(tid);
2392         if (threadid < (int)thrd_last_action->size())
2393                 return (*thrd_last_action)[id_to_int(tid)];
2394         else
2395                 return NULL;
2396 }
2397
2398 /**
2399  * @brief Get the last fence release performed by a particular Thread
2400  * @param tid The thread ID of the Thread in question
2401  * @return The last fence release in the thread, if one exists; NULL otherwise
2402  */
2403 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2404 {
2405         int threadid = id_to_int(tid);
2406         if (threadid < (int)thrd_last_fence_release->size())
2407                 return (*thrd_last_fence_release)[id_to_int(tid)];
2408         else
2409                 return NULL;
2410 }
2411
2412 /**
2413  * Gets the last memory_order_seq_cst write (in the total global sequence)
2414  * performed on a particular object (i.e., memory location), not including the
2415  * current action.
2416  * @param curr The current ModelAction; also denotes the object location to
2417  * check
2418  * @return The last seq_cst write
2419  */
2420 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2421 {
2422         void *location = curr->get_location();
2423         action_list_t *list = get_safe_ptr_action(obj_map, location);
2424         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2425         action_list_t::reverse_iterator rit;
2426         for (rit = list->rbegin(); rit != list->rend(); rit++)
2427                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2428                         return *rit;
2429         return NULL;
2430 }
2431
2432 /**
2433  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2434  * performed in a particular thread, prior to a particular fence.
2435  * @param tid The ID of the thread to check
2436  * @param before_fence The fence from which to begin the search; if NULL, then
2437  * search for the most recent fence in the thread.
2438  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2439  */
2440 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2441 {
2442         /* All fences should have NULL location */
2443         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2444         action_list_t::reverse_iterator rit = list->rbegin();
2445
2446         if (before_fence) {
2447                 for (; rit != list->rend(); rit++)
2448                         if (*rit == before_fence)
2449                                 break;
2450
2451                 ASSERT(*rit == before_fence);
2452                 rit++;
2453         }
2454
2455         for (; rit != list->rend(); rit++)
2456                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2457                         return *rit;
2458         return NULL;
2459 }
2460
2461 /**
2462  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2463  * location). This function identifies the mutex according to the current
2464  * action, which is presumed to perform on the same mutex.
2465  * @param curr The current ModelAction; also denotes the object location to
2466  * check
2467  * @return The last unlock operation
2468  */
2469 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2470 {
2471         void *location = curr->get_location();
2472         action_list_t *list = get_safe_ptr_action(obj_map, location);
2473         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2474         action_list_t::reverse_iterator rit;
2475         for (rit = list->rbegin(); rit != list->rend(); rit++)
2476                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2477                         return *rit;
2478         return NULL;
2479 }
2480
2481 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2482 {
2483         ModelAction *parent = get_last_action(tid);
2484         if (!parent)
2485                 parent = get_thread(tid)->get_creation();
2486         return parent;
2487 }
2488
2489 /**
2490  * Returns the clock vector for a given thread.
2491  * @param tid The thread whose clock vector we want
2492  * @return Desired clock vector
2493  */
2494 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2495 {
2496         return get_parent_action(tid)->get_cv();
2497 }
2498
2499 /**
2500  * @brief Find the promise, if any to resolve for the current action
2501  * @param curr The current ModelAction. Should be a write.
2502  * @return The (non-negative) index for the Promise to resolve, if any;
2503  * otherwise -1
2504  */
2505 int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const
2506 {
2507         for (unsigned int i = 0; i < promises->size(); i++)
2508                 if (curr->get_node()->get_promise(i))
2509                         return i;
2510         return -1;
2511 }
2512
2513 /**
2514  * Resolve a Promise with a current write.
2515  * @param write The ModelAction that is fulfilling Promises
2516  * @param promise_idx The index corresponding to the promise
2517  * @return True if the Promise was successfully resolved; false otherwise
2518  */
2519 bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx)
2520 {
2521         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2522         Promise *promise = (*promises)[promise_idx];
2523
2524         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2525                 ModelAction *read = promise->get_reader(i);
2526                 read_from(read, write);
2527                 actions_to_check.push_back(read);
2528         }
2529         /* Make sure the promise's value matches the write's value */
2530         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2531         if (!mo_graph->resolvePromise(promise, write))
2532                 priv->failed_promise = true;
2533
2534         promises->erase(promises->begin() + promise_idx);
2535         /**
2536          * @todo  It is possible to end up in an inconsistent state, where a
2537          * "resolved" promise may still be referenced if
2538          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2539          *
2540          * Note that the inconsistency only matters when dumping mo_graph to
2541          * file.
2542          *
2543          * delete promise;
2544          */
2545
2546         //Check whether reading these writes has made threads unable to
2547         //resolve promises
2548         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2549                 ModelAction *read = actions_to_check[i];
2550                 mo_check_promises(read, true);
2551         }
2552
2553         return true;
2554 }
2555
2556 /**
2557  * Compute the set of promises that could potentially be satisfied by this
2558  * action. Note that the set computation actually appears in the Node, not in
2559  * ModelChecker.
2560  * @param curr The ModelAction that may satisfy promises
2561  */
2562 void ModelChecker::compute_promises(ModelAction *curr)
2563 {
2564         for (unsigned int i = 0; i < promises->size(); i++) {
2565                 Promise *promise = (*promises)[i];
2566                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2567                         continue;
2568
2569                 bool satisfy = true;
2570                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2571                         const ModelAction *act = promise->get_reader(j);
2572                         if (act->happens_before(curr) ||
2573                                         act->could_synchronize_with(curr)) {
2574                                 satisfy = false;
2575                                 break;
2576                         }
2577                 }
2578                 if (satisfy)
2579                         curr->get_node()->set_promise(i);
2580         }
2581 }
2582
2583 /** Checks promises in response to change in ClockVector Threads. */
2584 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2585 {
2586         for (unsigned int i = 0; i < promises->size(); i++) {
2587                 Promise *promise = (*promises)[i];
2588                 if (!promise->thread_is_available(tid))
2589                         continue;
2590                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2591                         const ModelAction *act = promise->get_reader(j);
2592                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2593                                         merge_cv->synchronized_since(act)) {
2594                                 if (promise->eliminate_thread(tid)) {
2595                                         /* Promise has failed */
2596                                         priv->failed_promise = true;
2597                                         return;
2598                                 }
2599                         }
2600                 }
2601         }
2602 }
2603
2604 void ModelChecker::check_promises_thread_disabled()
2605 {
2606         for (unsigned int i = 0; i < promises->size(); i++) {
2607                 Promise *promise = (*promises)[i];
2608                 if (promise->has_failed()) {
2609                         priv->failed_promise = true;
2610                         return;
2611                 }
2612         }
2613 }
2614
2615 /**
2616  * @brief Checks promises in response to addition to modification order for
2617  * threads.
2618  *
2619  * We test whether threads are still available for satisfying promises after an
2620  * addition to our modification order constraints. Those that are unavailable
2621  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2622  * that promise has failed.
2623  *
2624  * @param act The ModelAction which updated the modification order
2625  * @param is_read_check Should be true if act is a read and we must check for
2626  * updates to the store from which it read (there is a distinction here for
2627  * RMW's, which are both a load and a store)
2628  */
2629 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2630 {
2631         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2632
2633         for (unsigned int i = 0; i < promises->size(); i++) {
2634                 Promise *promise = (*promises)[i];
2635
2636                 // Is this promise on the same location?
2637                 if (!promise->same_location(write))
2638                         continue;
2639
2640                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2641                         const ModelAction *pread = promise->get_reader(j);
2642                         if (!pread->happens_before(act))
2643                                continue;
2644                         if (mo_graph->checkPromise(write, promise)) {
2645                                 priv->failed_promise = true;
2646                                 return;
2647                         }
2648                         break;
2649                 }
2650
2651                 // Don't do any lookups twice for the same thread
2652                 if (!promise->thread_is_available(act->get_tid()))
2653                         continue;
2654
2655                 if (mo_graph->checkReachable(promise, write)) {
2656                         if (mo_graph->checkPromise(write, promise)) {
2657                                 priv->failed_promise = true;
2658                                 return;
2659                         }
2660                 }
2661         }
2662 }
2663
2664 /**
2665  * Compute the set of writes that may break the current pending release
2666  * sequence. This information is extracted from previou release sequence
2667  * calculations.
2668  *
2669  * @param curr The current ModelAction. Must be a release sequence fixup
2670  * action.
2671  */
2672 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2673 {
2674         if (pending_rel_seqs->empty())
2675                 return;
2676
2677         struct release_seq *pending = pending_rel_seqs->back();
2678         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2679                 const ModelAction *write = pending->writes[i];
2680                 curr->get_node()->add_relseq_break(write);
2681         }
2682
2683         /* NULL means don't break the sequence; just synchronize */
2684         curr->get_node()->add_relseq_break(NULL);
2685 }
2686
2687 /**
2688  * Build up an initial set of all past writes that this 'read' action may read
2689  * from, as well as any previously-observed future values that must still be valid.
2690  *
2691  * @param curr is the current ModelAction that we are exploring; it must be a
2692  * 'read' operation.
2693  */
2694 void ModelChecker::build_may_read_from(ModelAction *curr)
2695 {
2696         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2697         unsigned int i;
2698         ASSERT(curr->is_read());
2699
2700         ModelAction *last_sc_write = NULL;
2701
2702         if (curr->is_seqcst())
2703                 last_sc_write = get_last_seq_cst_write(curr);
2704
2705         /* Iterate over all threads */
2706         for (i = 0; i < thrd_lists->size(); i++) {
2707                 /* Iterate over actions in thread, starting from most recent */
2708                 action_list_t *list = &(*thrd_lists)[i];
2709                 action_list_t::reverse_iterator rit;
2710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2711                         ModelAction *act = *rit;
2712
2713                         /* Only consider 'write' actions */
2714                         if (!act->is_write() || act == curr)
2715                                 continue;
2716
2717                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2718                         bool allow_read = true;
2719
2720                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2721                                 allow_read = false;
2722                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2723                                 allow_read = false;
2724
2725                         if (allow_read) {
2726                                 /* Only add feasible reads */
2727                                 mo_graph->startChanges();
2728                                 r_modification_order(curr, act);
2729                                 if (!is_infeasible())
2730                                         curr->get_node()->add_read_from_past(act);
2731                                 mo_graph->rollbackChanges();
2732                         }
2733
2734                         /* Include at most one act per-thread that "happens before" curr */
2735                         if (act->happens_before(curr))
2736                                 break;
2737                 }
2738         }
2739
2740         /* Inherit existing, promised future values */
2741         for (i = 0; i < promises->size(); i++) {
2742                 const Promise *promise = (*promises)[i];
2743                 const ModelAction *promise_read = promise->get_reader(0);
2744                 if (promise_read->same_var(curr)) {
2745                         /* Only add feasible future-values */
2746                         mo_graph->startChanges();
2747                         r_modification_order(curr, promise);
2748                         if (!is_infeasible())
2749                                 curr->get_node()->add_read_from_promise(promise_read);
2750                         mo_graph->rollbackChanges();
2751                 }
2752         }
2753
2754         /* We may find no valid may-read-from only if the execution is doomed */
2755         if (!curr->get_node()->read_from_size()) {
2756                 priv->no_valid_reads = true;
2757                 set_assert();
2758         }
2759
2760         if (DBG_ENABLED()) {
2761                 model_print("Reached read action:\n");
2762                 curr->print();
2763                 model_print("Printing read_from_past\n");
2764                 curr->get_node()->print_read_from_past();
2765                 model_print("End printing read_from_past\n");
2766         }
2767 }
2768
2769 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2770 {
2771         for ( ; write != NULL; write = write->get_reads_from()) {
2772                 /* UNINIT actions don't have a Node, and they never sleep */
2773                 if (write->is_uninitialized())
2774                         return true;
2775                 Node *prevnode = write->get_node()->get_parent();
2776
2777                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2778                 if (write->is_release() && thread_sleep)
2779                         return true;
2780                 if (!write->is_rmw())
2781                         return false;
2782         }
2783         return true;
2784 }
2785
2786 /**
2787  * @brief Create a new action representing an uninitialized atomic
2788  * @param location The memory location of the atomic object
2789  * @return A pointer to a new ModelAction
2790  */
2791 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2792 {
2793         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2794         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2795         act->create_cv(NULL);
2796         return act;
2797 }
2798
2799 static void print_list(action_list_t *list)
2800 {
2801         action_list_t::iterator it;
2802
2803         model_print("---------------------------------------------------------------------\n");
2804
2805         unsigned int hash = 0;
2806
2807         for (it = list->begin(); it != list->end(); it++) {
2808                 (*it)->print();
2809                 hash = hash^(hash<<3)^((*it)->hash());
2810         }
2811         model_print("HASH %u\n", hash);
2812         model_print("---------------------------------------------------------------------\n");
2813 }
2814
2815 #if SUPPORT_MOD_ORDER_DUMP
2816 void ModelChecker::dumpGraph(char *filename) const
2817 {
2818         char buffer[200];
2819         sprintf(buffer, "%s.dot", filename);
2820         FILE *file = fopen(buffer, "w");
2821         fprintf(file, "digraph %s {\n", filename);
2822         mo_graph->dumpNodes(file);
2823         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2824
2825         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2826                 ModelAction *act = *it;
2827                 if (act->is_read()) {
2828                         mo_graph->dot_print_node(file, act);
2829                         if (act->get_reads_from())
2830                                 mo_graph->dot_print_edge(file,
2831                                                 act->get_reads_from(),
2832                                                 act,
2833                                                 "label=\"rf\", color=red, weight=2");
2834                         else
2835                                 mo_graph->dot_print_edge(file,
2836                                                 act->get_reads_from_promise(),
2837                                                 act,
2838                                                 "label=\"rf\", color=red");
2839                 }
2840                 if (thread_array[act->get_tid()]) {
2841                         mo_graph->dot_print_edge(file,
2842                                         thread_array[id_to_int(act->get_tid())],
2843                                         act,
2844                                         "label=\"sb\", color=blue, weight=400");
2845                 }
2846
2847                 thread_array[act->get_tid()] = act;
2848         }
2849         fprintf(file, "}\n");
2850         model_free(thread_array);
2851         fclose(file);
2852 }
2853 #endif
2854
2855 /** @brief Prints an execution trace summary. */
2856 void ModelChecker::print_summary() const
2857 {
2858 #if SUPPORT_MOD_ORDER_DUMP
2859         char buffername[100];
2860         sprintf(buffername, "exec%04u", stats.num_total);
2861         mo_graph->dumpGraphToFile(buffername);
2862         sprintf(buffername, "graph%04u", stats.num_total);
2863         dumpGraph(buffername);
2864 #endif
2865
2866         model_print("Execution %d:", stats.num_total);
2867         if (isfeasibleprefix()) {
2868                 if (scheduler->all_threads_sleeping())
2869                         model_print(" SLEEP-SET REDUNDANT");
2870                 model_print("\n");
2871         } else
2872                 print_infeasibility(" INFEASIBLE");
2873         print_list(action_trace);
2874         model_print("\n");
2875         if (!promises->empty()) {
2876                 model_print("Pending promises:\n");
2877                 for (unsigned int i = 0; i < promises->size(); i++) {
2878                         model_print(" [P%u] ", i);
2879                         (*promises)[i]->print();
2880                 }
2881                 model_print("\n");
2882         }
2883 }
2884
2885 /**
2886  * Add a Thread to the system for the first time. Should only be called once
2887  * per thread.
2888  * @param t The Thread to add
2889  */
2890 void ModelChecker::add_thread(Thread *t)
2891 {
2892         thread_map->put(id_to_int(t->get_id()), t);
2893         scheduler->add_thread(t);
2894 }
2895
2896 /**
2897  * Removes a thread from the scheduler.
2898  * @param the thread to remove.
2899  */
2900 void ModelChecker::remove_thread(Thread *t)
2901 {
2902         scheduler->remove_thread(t);
2903 }
2904
2905 /**
2906  * @brief Get a Thread reference by its ID
2907  * @param tid The Thread's ID
2908  * @return A Thread reference
2909  */
2910 Thread * ModelChecker::get_thread(thread_id_t tid) const
2911 {
2912         return thread_map->get(id_to_int(tid));
2913 }
2914
2915 /**
2916  * @brief Get a reference to the Thread in which a ModelAction was executed
2917  * @param act The ModelAction
2918  * @return A Thread reference
2919  */
2920 Thread * ModelChecker::get_thread(const ModelAction *act) const
2921 {
2922         return get_thread(act->get_tid());
2923 }
2924
2925 /**
2926  * @brief Get a Promise's "promise number"
2927  *
2928  * A "promise number" is an index number that is unique to a promise, valid
2929  * only for a specific snapshot of an execution trace. Promises may come and go
2930  * as they are generated an resolved, so an index only retains meaning for the
2931  * current snapshot.
2932  *
2933  * @param promise The Promise to check
2934  * @return The promise index, if the promise still is valid; otherwise -1
2935  */
2936 int ModelChecker::get_promise_number(const Promise *promise) const
2937 {
2938         for (unsigned int i = 0; i < promises->size(); i++)
2939                 if ((*promises)[i] == promise)
2940                         return i;
2941         /* Not found */
2942         return -1;
2943 }
2944
2945 /**
2946  * @brief Check if a Thread is currently enabled
2947  * @param t The Thread to check
2948  * @return True if the Thread is currently enabled
2949  */
2950 bool ModelChecker::is_enabled(Thread *t) const
2951 {
2952         return scheduler->is_enabled(t);
2953 }
2954
2955 /**
2956  * @brief Check if a Thread is currently enabled
2957  * @param tid The ID of the Thread to check
2958  * @return True if the Thread is currently enabled
2959  */
2960 bool ModelChecker::is_enabled(thread_id_t tid) const
2961 {
2962         return scheduler->is_enabled(tid);
2963 }
2964
2965 /**
2966  * Switch from a model-checker context to a user-thread context. This is the
2967  * complement of ModelChecker::switch_to_master and must be called from the
2968  * model-checker context
2969  *
2970  * @param thread The user-thread to switch to
2971  */
2972 void ModelChecker::switch_from_master(Thread *thread)
2973 {
2974         scheduler->set_current_thread(thread);
2975         Thread::swap(&system_context, thread);
2976 }
2977
2978 /**
2979  * Switch from a user-context to the "master thread" context (a.k.a. system
2980  * context). This switch is made with the intention of exploring a particular
2981  * model-checking action (described by a ModelAction object). Must be called
2982  * from a user-thread context.
2983  *
2984  * @param act The current action that will be explored. May be NULL only if
2985  * trace is exiting via an assertion (see ModelChecker::set_assert and
2986  * ModelChecker::has_asserted).
2987  * @return Return the value returned by the current action
2988  */
2989 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2990 {
2991         DBG();
2992         Thread *old = thread_current();
2993         ASSERT(!old->get_pending());
2994         old->set_pending(act);
2995         if (Thread::swap(old, &system_context) < 0) {
2996                 perror("swap threads");
2997                 exit(EXIT_FAILURE);
2998         }
2999         return old->get_return_value();
3000 }
3001
3002 /**
3003  * Takes the next step in the execution, if possible.
3004  * @param curr The current step to take
3005  * @return Returns the next Thread to run, if any; NULL if this execution
3006  * should terminate
3007  */
3008 Thread * ModelChecker::take_step(ModelAction *curr)
3009 {
3010         Thread *curr_thrd = get_thread(curr);
3011         ASSERT(curr_thrd->get_state() == THREAD_READY);
3012
3013         curr = check_current_action(curr);
3014
3015         /* Infeasible -> don't take any more steps */
3016         if (is_infeasible())
3017                 return NULL;
3018         else if (isfeasibleprefix() && have_bug_reports()) {
3019                 set_assert();
3020                 return NULL;
3021         }
3022
3023         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3024                 return NULL;
3025
3026         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3027                 scheduler->remove_thread(curr_thrd);
3028
3029         Thread *next_thrd = get_next_thread(curr);
3030
3031         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3032                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3033
3034         return next_thrd;
3035 }
3036
3037 /** Wrapper to run the user's main function, with appropriate arguments */
3038 void user_main_wrapper(void *)
3039 {
3040         user_main(model->params.argc, model->params.argv);
3041 }
3042
3043 /** @brief Run ModelChecker for the user program */
3044 void ModelChecker::run()
3045 {
3046         do {
3047                 thrd_t user_thread;
3048                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3049                 add_thread(t);
3050
3051                 do {
3052                         /*
3053                          * Stash next pending action(s) for thread(s). There
3054                          * should only need to stash one thread's action--the
3055                          * thread which just took a step--plus the first step
3056                          * for any newly-created thread
3057                          */
3058                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3059                                 thread_id_t tid = int_to_id(i);
3060                                 Thread *thr = get_thread(tid);
3061                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3062                                         switch_from_master(thr);
3063                                         if (is_circular_wait(thr))
3064                                                 assert_bug("Deadlock detected");
3065                                 }
3066                         }
3067
3068                         /* Catch assertions from prior take_step or from
3069                          * between-ModelAction bugs (e.g., data races) */
3070                         if (has_asserted())
3071                                 break;
3072
3073                         /* Consume the next action for a Thread */
3074                         ModelAction *curr = t->get_pending();
3075                         t->set_pending(NULL);
3076                         t = take_step(curr);
3077                 } while (t && !t->is_model_thread());
3078
3079                 /*
3080                  * Launch end-of-execution release sequence fixups only when
3081                  * the execution is otherwise feasible AND there are:
3082                  *
3083                  * (1) pending release sequences
3084                  * (2) pending assertions that could be invalidated by a change
3085                  * in clock vectors (i.e., data races)
3086                  * (3) no pending promises
3087                  */
3088                 while (!pending_rel_seqs->empty() &&
3089                                 is_feasible_prefix_ignore_relseq() &&
3090                                 !unrealizedraces.empty()) {
3091                         model_print("*** WARNING: release sequence fixup action "
3092                                         "(%zu pending release seuqence(s)) ***\n",
3093                                         pending_rel_seqs->size());
3094                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3095                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3096                                         model_thread);
3097                         take_step(fixup);
3098                 };
3099         } while (next_execution());
3100
3101         model_print("******* Model-checking complete: *******\n");
3102         print_stats();
3103 }