model: pull thread control logic out of take_step(), check_current...()
[c11tester.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         SnapVector<bug_message *> bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
88         promises(new SnapVector<Promise *>()),
89         futurevalues(new SnapVector<struct PendingFutureValue>()),
90         pending_rel_seqs(new SnapVector<struct release_seq *>()),
91         thrd_last_action(new SnapVector<ModelAction *>(1)),
92         thrd_last_fence_release(new SnapVector<ModelAction *>()),
93         node_stack(new NodeStack()),
94         priv(new struct model_snapshot_members()),
95         mo_graph(new CycleGraph())
96 {
97         /* Initialize a model-checker thread, for special ModelActions */
98         model_thread = new Thread(get_next_id());
99         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
100 }
101
102 /** @brief Destructor */
103 ModelChecker::~ModelChecker()
104 {
105         for (unsigned int i = 0; i < get_num_threads(); i++)
106                 delete thread_map->get(i);
107         delete thread_map;
108
109         delete obj_thrd_map;
110         delete obj_map;
111         delete condvar_waiters_map;
112         delete action_trace;
113
114         for (unsigned int i = 0; i < promises->size(); i++)
115                 delete (*promises)[i];
116         delete promises;
117
118         delete pending_rel_seqs;
119
120         delete thrd_last_action;
121         delete thrd_last_fence_release;
122         delete node_stack;
123         delete scheduler;
124         delete mo_graph;
125         delete priv;
126 }
127
128 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
129 {
130         action_list_t *tmp = hash->get(ptr);
131         if (tmp == NULL) {
132                 tmp = new action_list_t();
133                 hash->put(ptr, tmp);
134         }
135         return tmp;
136 }
137
138 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
139 {
140         SnapVector<action_list_t> *tmp = hash->get(ptr);
141         if (tmp == NULL) {
142                 tmp = new SnapVector<action_list_t>();
143                 hash->put(ptr, tmp);
144         }
145         return tmp;
146 }
147
148 /**
149  * Restores user program to initial state and resets all model-checker data
150  * structures.
151  */
152 void ModelChecker::reset_to_initial_state()
153 {
154         DEBUG("+++ Resetting to initial state +++\n");
155         node_stack->reset_execution();
156
157         /**
158          * FIXME: if we utilize partial rollback, we will need to free only
159          * those pending actions which were NOT pending before the rollback
160          * point
161          */
162         for (unsigned int i = 0; i < get_num_threads(); i++)
163                 delete get_thread(int_to_id(i))->get_pending();
164
165         snapshot_backtrack_before(0);
166 }
167
168 /** @return a thread ID for a new Thread */
169 thread_id_t ModelChecker::get_next_id()
170 {
171         return priv->next_thread_id++;
172 }
173
174 /** @return the number of user threads created during this execution */
175 unsigned int ModelChecker::get_num_threads() const
176 {
177         return priv->next_thread_id;
178 }
179
180 /**
181  * Must be called from user-thread context (e.g., through the global
182  * thread_current() interface)
183  *
184  * @return The currently executing Thread.
185  */
186 Thread * ModelChecker::get_current_thread() const
187 {
188         return scheduler->get_current_thread();
189 }
190
191 /** @return a sequence number for a new ModelAction */
192 modelclock_t ModelChecker::get_next_seq_num()
193 {
194         return ++priv->used_sequence_numbers;
195 }
196
197 Node * ModelChecker::get_curr_node() const
198 {
199         return node_stack->get_head();
200 }
201
202 /**
203  * @brief Select the next thread to execute based on the curren action
204  *
205  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
206  * actions should be followed by the execution of their child thread. In either
207  * case, the current action should determine the next thread schedule.
208  *
209  * @param curr The current action
210  * @return The next thread to run, if the current action will determine this
211  * selection; otherwise NULL
212  */
213 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
214 {
215         /* Do not split atomic RMW */
216         if (curr->is_rmwr())
217                 return get_thread(curr);
218         /* Follow CREATE with the created thread */
219         if (curr->get_type() == THREAD_CREATE)
220                 return curr->get_thread_operand();
221         return NULL;
222 }
223
224 /**
225  * @brief Choose the next thread to execute.
226  *
227  * This function chooses the next thread that should execute. It can enforce
228  * execution replay/backtracking or, if the model-checker has no preference
229  * regarding the next thread (i.e., when exploring a new execution ordering),
230  * we defer to the scheduler.
231  *
232  * @return The next chosen thread to run, if any exist. Or else if the current
233  * execution should terminate, return NULL.
234  */
235 Thread * ModelChecker::get_next_thread()
236 {
237         thread_id_t tid;
238
239         /*
240          * Have we completed exploring the preselected path? Then let the
241          * scheduler decide
242          */
243         if (diverge == NULL)
244                 return scheduler->select_next_thread();
245
246         /* Else, we are trying to replay an execution */
247         ModelAction *next = node_stack->get_next()->get_action();
248
249         if (next == diverge) {
250                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
251                         earliest_diverge = diverge;
252
253                 Node *nextnode = next->get_node();
254                 Node *prevnode = nextnode->get_parent();
255                 scheduler->update_sleep_set(prevnode);
256
257                 /* Reached divergence point */
258                 if (nextnode->increment_behaviors()) {
259                         /* Execute the same thread with a new behavior */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else {
263                         ASSERT(prevnode);
264                         /* Make a different thread execute for next step */
265                         scheduler->add_sleep(get_thread(next->get_tid()));
266                         tid = prevnode->get_next_backtrack();
267                         /* Make sure the backtracked thread isn't sleeping. */
268                         node_stack->pop_restofstack(1);
269                         if (diverge == earliest_diverge) {
270                                 earliest_diverge = prevnode->get_action();
271                         }
272                 }
273                 /* Start the round robin scheduler from this thread id */
274                 scheduler->set_scheduler_thread(tid);
275                 /* The correct sleep set is in the parent node. */
276                 execute_sleep_set();
277
278                 DEBUG("*** Divergence point ***\n");
279
280                 diverge = NULL;
281         } else {
282                 tid = next->get_tid();
283         }
284         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
285         ASSERT(tid != THREAD_ID_T_NONE);
286         return get_thread(id_to_int(tid));
287 }
288
289 /**
290  * We need to know what the next actions of all threads in the sleep
291  * set will be.  This method computes them and stores the actions at
292  * the corresponding thread object's pending action.
293  */
294
295 void ModelChecker::execute_sleep_set()
296 {
297         for (unsigned int i = 0; i < get_num_threads(); i++) {
298                 thread_id_t tid = int_to_id(i);
299                 Thread *thr = get_thread(tid);
300                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
301                         thr->get_pending()->set_sleep_flag();
302                 }
303         }
304 }
305
306 /**
307  * @brief Should the current action wake up a given thread?
308  *
309  * @param curr The current action
310  * @param thread The thread that we might wake up
311  * @return True, if we should wake up the sleeping thread; false otherwise
312  */
313 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
314 {
315         const ModelAction *asleep = thread->get_pending();
316         /* Don't allow partial RMW to wake anyone up */
317         if (curr->is_rmwr())
318                 return false;
319         /* Synchronizing actions may have been backtracked */
320         if (asleep->could_synchronize_with(curr))
321                 return true;
322         /* All acquire/release fences and fence-acquire/store-release */
323         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
324                 return true;
325         /* Fence-release + store can awake load-acquire on the same location */
326         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
327                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
328                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
329                         return true;
330         }
331         return false;
332 }
333
334 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
335 {
336         for (unsigned int i = 0; i < get_num_threads(); i++) {
337                 Thread *thr = get_thread(int_to_id(i));
338                 if (scheduler->is_sleep_set(thr)) {
339                         if (should_wake_up(curr, thr))
340                                 /* Remove this thread from sleep set */
341                                 scheduler->remove_sleep(thr);
342                 }
343         }
344 }
345
346 /** @brief Alert the model-checker that an incorrectly-ordered
347  * synchronization was made */
348 void ModelChecker::set_bad_synchronization()
349 {
350         priv->bad_synchronization = true;
351 }
352
353 /**
354  * Check whether the current trace has triggered an assertion which should halt
355  * its execution.
356  *
357  * @return True, if the execution should be aborted; false otherwise
358  */
359 bool ModelChecker::has_asserted() const
360 {
361         return priv->asserted;
362 }
363
364 /**
365  * Trigger a trace assertion which should cause this execution to be halted.
366  * This can be due to a detected bug or due to an infeasibility that should
367  * halt ASAP.
368  */
369 void ModelChecker::set_assert()
370 {
371         priv->asserted = true;
372 }
373
374 /**
375  * Check if we are in a deadlock. Should only be called at the end of an
376  * execution, although it should not give false positives in the middle of an
377  * execution (there should be some ENABLED thread).
378  *
379  * @return True if program is in a deadlock; false otherwise
380  */
381 bool ModelChecker::is_deadlocked() const
382 {
383         bool blocking_threads = false;
384         for (unsigned int i = 0; i < get_num_threads(); i++) {
385                 thread_id_t tid = int_to_id(i);
386                 if (is_enabled(tid))
387                         return false;
388                 Thread *t = get_thread(tid);
389                 if (!t->is_model_thread() && t->get_pending())
390                         blocking_threads = true;
391         }
392         return blocking_threads;
393 }
394
395 /**
396  * Check if this is a complete execution. That is, have all thread completed
397  * execution (rather than exiting because sleep sets have forced a redundant
398  * execution).
399  *
400  * @return True if the execution is complete.
401  */
402 bool ModelChecker::is_complete_execution() const
403 {
404         for (unsigned int i = 0; i < get_num_threads(); i++)
405                 if (is_enabled(int_to_id(i)))
406                         return false;
407         return true;
408 }
409
410 /**
411  * @brief Assert a bug in the executing program.
412  *
413  * Use this function to assert any sort of bug in the user program. If the
414  * current trace is feasible (actually, a prefix of some feasible execution),
415  * then this execution will be aborted, printing the appropriate message. If
416  * the current trace is not yet feasible, the error message will be stashed and
417  * printed if the execution ever becomes feasible.
418  *
419  * @param msg Descriptive message for the bug (do not include newline char)
420  * @return True if bug is immediately-feasible
421  */
422 bool ModelChecker::assert_bug(const char *msg)
423 {
424         priv->bugs.push_back(new bug_message(msg));
425
426         if (isfeasibleprefix()) {
427                 set_assert();
428                 return true;
429         }
430         return false;
431 }
432
433 /**
434  * @brief Assert a bug in the executing program, asserted by a user thread
435  * @see ModelChecker::assert_bug
436  * @param msg Descriptive message for the bug (do not include newline char)
437  */
438 void ModelChecker::assert_user_bug(const char *msg)
439 {
440         /* If feasible bug, bail out now */
441         if (assert_bug(msg))
442                 switch_to_master(NULL);
443 }
444
445 /** @return True, if any bugs have been reported for this execution */
446 bool ModelChecker::have_bug_reports() const
447 {
448         return priv->bugs.size() != 0;
449 }
450
451 /** @brief Print bug report listing for this execution (if any bugs exist) */
452 void ModelChecker::print_bugs() const
453 {
454         if (have_bug_reports()) {
455                 model_print("Bug report: %zu bug%s detected\n",
456                                 priv->bugs.size(),
457                                 priv->bugs.size() > 1 ? "s" : "");
458                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
459                         priv->bugs[i]->print();
460         }
461 }
462
463 /**
464  * @brief Record end-of-execution stats
465  *
466  * Must be run when exiting an execution. Records various stats.
467  * @see struct execution_stats
468  */
469 void ModelChecker::record_stats()
470 {
471         stats.num_total++;
472         if (!isfeasibleprefix())
473                 stats.num_infeasible++;
474         else if (have_bug_reports())
475                 stats.num_buggy_executions++;
476         else if (is_complete_execution())
477                 stats.num_complete++;
478         else {
479                 stats.num_redundant++;
480
481                 /**
482                  * @todo We can violate this ASSERT() when fairness/sleep sets
483                  * conflict to cause an execution to terminate, e.g. with:
484                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
485                  */
486                 //ASSERT(scheduler->all_threads_sleeping());
487         }
488 }
489
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
492 {
493         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494         model_print("Number of redundant executions: %d\n", stats.num_redundant);
495         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497         model_print("Total executions: %d\n", stats.num_total);
498         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
499 }
500
501 /**
502  * @brief End-of-exeuction print
503  * @param printbugs Should any existing bugs be printed?
504  */
505 void ModelChecker::print_execution(bool printbugs) const
506 {
507         print_program_output();
508
509         if (params.verbose) {
510                 model_print("Earliest divergence point since last feasible execution:\n");
511                 if (earliest_diverge)
512                         earliest_diverge->print();
513                 else
514                         model_print("(Not set)\n");
515
516                 model_print("\n");
517                 print_stats();
518         }
519
520         /* Don't print invalid bugs */
521         if (printbugs)
522                 print_bugs();
523
524         model_print("\n");
525         print_summary();
526 }
527
528 /**
529  * Queries the model-checker for more executions to explore and, if one
530  * exists, resets the model-checker state to execute a new execution.
531  *
532  * @return If there are more executions to explore, return true. Otherwise,
533  * return false.
534  */
535 bool ModelChecker::next_execution()
536 {
537         DBG();
538         /* Is this execution a feasible execution that's worth bug-checking? */
539         bool complete = isfeasibleprefix() && (is_complete_execution() ||
540                         have_bug_reports());
541
542         /* End-of-execution bug checks */
543         if (complete) {
544                 if (is_deadlocked())
545                         assert_bug("Deadlock detected");
546
547                 checkDataRaces();
548         }
549
550         record_stats();
551
552         /* Output */
553         if (params.verbose || (complete && have_bug_reports()))
554                 print_execution(complete);
555         else
556                 clear_program_output();
557
558         if (complete)
559                 earliest_diverge = NULL;
560
561         if ((diverge = get_next_backtrack()) == NULL)
562                 return false;
563
564         if (DBG_ENABLED()) {
565                 model_print("Next execution will diverge at:\n");
566                 diverge->print();
567         }
568
569         reset_to_initial_state();
570         return true;
571 }
572
573 /**
574  * @brief Find the last fence-related backtracking conflict for a ModelAction
575  *
576  * This function performs the search for the most recent conflicting action
577  * against which we should perform backtracking, as affected by fence
578  * operations. This includes pairs of potentially-synchronizing actions which
579  * occur due to fence-acquire or fence-release, and hence should be explored in
580  * the opposite execution order.
581  *
582  * @param act The current action
583  * @return The most recent action which conflicts with act due to fences
584  */
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
586 {
587         /* Only perform release/acquire fence backtracking for stores */
588         if (!act->is_write())
589                 return NULL;
590
591         /* Find a fence-release (or, act is a release) */
592         ModelAction *last_release;
593         if (act->is_release())
594                 last_release = act;
595         else
596                 last_release = get_last_fence_release(act->get_tid());
597         if (!last_release)
598                 return NULL;
599
600         /* Skip past the release */
601         action_list_t *list = action_trace;
602         action_list_t::reverse_iterator rit;
603         for (rit = list->rbegin(); rit != list->rend(); rit++)
604                 if (*rit == last_release)
605                         break;
606         ASSERT(rit != list->rend());
607
608         /* Find a prior:
609          *   load-acquire
610          * or
611          *   load --sb-> fence-acquire */
612         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
613         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
614         bool found_acquire_fences = false;
615         for ( ; rit != list->rend(); rit++) {
616                 ModelAction *prev = *rit;
617                 if (act->same_thread(prev))
618                         continue;
619
620                 int tid = id_to_int(prev->get_tid());
621
622                 if (prev->is_read() && act->same_var(prev)) {
623                         if (prev->is_acquire()) {
624                                 /* Found most recent load-acquire, don't need
625                                  * to search for more fences */
626                                 if (!found_acquire_fences)
627                                         return NULL;
628                         } else {
629                                 prior_loads[tid] = prev;
630                         }
631                 }
632                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633                         found_acquire_fences = true;
634                         acquire_fences[tid] = prev;
635                 }
636         }
637
638         ModelAction *latest_backtrack = NULL;
639         for (unsigned int i = 0; i < acquire_fences.size(); i++)
640                 if (acquire_fences[i] && prior_loads[i])
641                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642                                 latest_backtrack = acquire_fences[i];
643         return latest_backtrack;
644 }
645
646 /**
647  * @brief Find the last backtracking conflict for a ModelAction
648  *
649  * This function performs the search for the most recent conflicting action
650  * against which we should perform backtracking. This primary includes pairs of
651  * synchronizing actions which should be explored in the opposite execution
652  * order.
653  *
654  * @param act The current action
655  * @return The most recent action which conflicts with act
656  */
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
658 {
659         switch (act->get_type()) {
660         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
661         case ATOMIC_READ:
662         case ATOMIC_WRITE:
663         case ATOMIC_RMW: {
664                 ModelAction *ret = NULL;
665
666                 /* linear search: from most recent to oldest */
667                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668                 action_list_t::reverse_iterator rit;
669                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670                         ModelAction *prev = *rit;
671                         if (prev->could_synchronize_with(act)) {
672                                 ret = prev;
673                                 break;
674                         }
675                 }
676
677                 ModelAction *ret2 = get_last_fence_conflict(act);
678                 if (!ret2)
679                         return ret;
680                 if (!ret)
681                         return ret2;
682                 if (*ret < *ret2)
683                         return ret2;
684                 return ret;
685         }
686         case ATOMIC_LOCK:
687         case ATOMIC_TRYLOCK: {
688                 /* linear search: from most recent to oldest */
689                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690                 action_list_t::reverse_iterator rit;
691                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692                         ModelAction *prev = *rit;
693                         if (act->is_conflicting_lock(prev))
694                                 return prev;
695                 }
696                 break;
697         }
698         case ATOMIC_UNLOCK: {
699                 /* linear search: from most recent to oldest */
700                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701                 action_list_t::reverse_iterator rit;
702                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703                         ModelAction *prev = *rit;
704                         if (!act->same_thread(prev) && prev->is_failed_trylock())
705                                 return prev;
706                 }
707                 break;
708         }
709         case ATOMIC_WAIT: {
710                 /* linear search: from most recent to oldest */
711                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712                 action_list_t::reverse_iterator rit;
713                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714                         ModelAction *prev = *rit;
715                         if (!act->same_thread(prev) && prev->is_failed_trylock())
716                                 return prev;
717                         if (!act->same_thread(prev) && prev->is_notify())
718                                 return prev;
719                 }
720                 break;
721         }
722
723         case ATOMIC_NOTIFY_ALL:
724         case ATOMIC_NOTIFY_ONE: {
725                 /* linear search: from most recent to oldest */
726                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727                 action_list_t::reverse_iterator rit;
728                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729                         ModelAction *prev = *rit;
730                         if (!act->same_thread(prev) && prev->is_wait())
731                                 return prev;
732                 }
733                 break;
734         }
735         default:
736                 break;
737         }
738         return NULL;
739 }
740
741 /** This method finds backtracking points where we should try to
742  * reorder the parameter ModelAction against.
743  *
744  * @param the ModelAction to find backtracking points for.
745  */
746 void ModelChecker::set_backtracking(ModelAction *act)
747 {
748         Thread *t = get_thread(act);
749         ModelAction *prev = get_last_conflict(act);
750         if (prev == NULL)
751                 return;
752
753         Node *node = prev->get_node()->get_parent();
754
755         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
756         int low_tid, high_tid;
757         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
758                 low_tid = id_to_int(act->get_tid());
759                 high_tid = low_tid + 1;
760         } else {
761                 low_tid = 0;
762                 high_tid = get_num_threads();
763         }
764
765         for (int i = low_tid; i < high_tid; i++) {
766                 thread_id_t tid = int_to_id(i);
767
768                 /* Make sure this thread can be enabled here. */
769                 if (i >= node->get_num_threads())
770                         break;
771
772                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
773                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774                 if (node->enabled_status(tid) != THREAD_ENABLED)
775                         continue;
776
777                 /* Check if this has been explored already */
778                 if (node->has_been_explored(tid))
779                         continue;
780
781                 /* See if fairness allows */
782                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
783                         bool unfair = false;
784                         for (int t = 0; t < node->get_num_threads(); t++) {
785                                 thread_id_t tother = int_to_id(t);
786                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
787                                         unfair = true;
788                                         break;
789                                 }
790                         }
791                         if (unfair)
792                                 continue;
793                 }
794
795                 /* See if CHESS-like yield fairness allows */
796                 if (model->params.yieldon) {
797                         bool unfair = false;
798                         for (int t = 0; t < node->get_num_threads(); t++) {
799                                 thread_id_t tother = int_to_id(t);
800                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
801                                         unfair = true;
802                                         break;
803                                 }
804                         }
805                         if (unfair)
806                                 continue;
807                 }
808                 
809                 /* Cache the latest backtracking point */
810                 set_latest_backtrack(prev);
811
812                 /* If this is a new backtracking point, mark the tree */
813                 if (!node->set_backtrack(tid))
814                         continue;
815                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
816                                         id_to_int(prev->get_tid()),
817                                         id_to_int(t->get_id()));
818                 if (DBG_ENABLED()) {
819                         prev->print();
820                         act->print();
821                 }
822         }
823 }
824
825 /**
826  * @brief Cache the a backtracking point as the "most recent", if eligible
827  *
828  * Note that this does not prepare the NodeStack for this backtracking
829  * operation, it only caches the action on a per-execution basis
830  *
831  * @param act The operation at which we should explore a different next action
832  * (i.e., backtracking point)
833  * @return True, if this action is now the most recent backtracking point;
834  * false otherwise
835  */
836 bool ModelChecker::set_latest_backtrack(ModelAction *act)
837 {
838         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
839                 priv->next_backtrack = act;
840                 return true;
841         }
842         return false;
843 }
844
845 /**
846  * Returns last backtracking point. The model checker will explore a different
847  * path for this point in the next execution.
848  * @return The ModelAction at which the next execution should diverge.
849  */
850 ModelAction * ModelChecker::get_next_backtrack()
851 {
852         ModelAction *next = priv->next_backtrack;
853         priv->next_backtrack = NULL;
854         return next;
855 }
856
857 /**
858  * Processes a read model action.
859  * @param curr is the read model action to process.
860  * @return True if processing this read updates the mo_graph.
861  */
862 bool ModelChecker::process_read(ModelAction *curr)
863 {
864         Node *node = curr->get_node();
865         while (true) {
866                 bool updated = false;
867                 switch (node->get_read_from_status()) {
868                 case READ_FROM_PAST: {
869                         const ModelAction *rf = node->get_read_from_past();
870                         ASSERT(rf);
871
872                         mo_graph->startChanges();
873
874                         ASSERT(!is_infeasible());
875                         if (!check_recency(curr, rf)) {
876                                 if (node->increment_read_from()) {
877                                         mo_graph->rollbackChanges();
878                                         continue;
879                                 } else {
880                                         priv->too_many_reads = true;
881                                 }
882                         }
883
884                         updated = r_modification_order(curr, rf);
885                         read_from(curr, rf);
886                         mo_graph->commitChanges();
887                         mo_check_promises(curr, true);
888                         break;
889                 }
890                 case READ_FROM_PROMISE: {
891                         Promise *promise = curr->get_node()->get_read_from_promise();
892                         if (promise->add_reader(curr))
893                                 priv->failed_promise = true;
894                         curr->set_read_from_promise(promise);
895                         mo_graph->startChanges();
896                         if (!check_recency(curr, promise))
897                                 priv->too_many_reads = true;
898                         updated = r_modification_order(curr, promise);
899                         mo_graph->commitChanges();
900                         break;
901                 }
902                 case READ_FROM_FUTURE: {
903                         /* Read from future value */
904                         struct future_value fv = node->get_future_value();
905                         Promise *promise = new Promise(curr, fv);
906                         curr->set_read_from_promise(promise);
907                         promises->push_back(promise);
908                         mo_graph->startChanges();
909                         updated = r_modification_order(curr, promise);
910                         mo_graph->commitChanges();
911                         break;
912                 }
913                 default:
914                         ASSERT(false);
915                 }
916                 get_thread(curr)->set_return_value(curr->get_return_value());
917                 return updated;
918         }
919 }
920
921 /**
922  * Processes a lock, trylock, or unlock model action.  @param curr is
923  * the read model action to process.
924  *
925  * The try lock operation checks whether the lock is taken.  If not,
926  * it falls to the normal lock operation case.  If so, it returns
927  * fail.
928  *
929  * The lock operation has already been checked that it is enabled, so
930  * it just grabs the lock and synchronizes with the previous unlock.
931  *
932  * The unlock operation has to re-enable all of the threads that are
933  * waiting on the lock.
934  *
935  * @return True if synchronization was updated; false otherwise
936  */
937 bool ModelChecker::process_mutex(ModelAction *curr)
938 {
939         std::mutex *mutex = curr->get_mutex();
940         struct std::mutex_state *state = NULL;
941
942         if (mutex)
943                 state = mutex->get_state();
944
945         switch (curr->get_type()) {
946         case ATOMIC_TRYLOCK: {
947                 bool success = !state->locked;
948                 curr->set_try_lock(success);
949                 if (!success) {
950                         get_thread(curr)->set_return_value(0);
951                         break;
952                 }
953                 get_thread(curr)->set_return_value(1);
954         }
955                 //otherwise fall into the lock case
956         case ATOMIC_LOCK: {
957                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
958                         assert_bug("Lock access before initialization");
959                 state->locked = get_thread(curr);
960                 ModelAction *unlock = get_last_unlock(curr);
961                 //synchronize with the previous unlock statement
962                 if (unlock != NULL) {
963                         curr->synchronize_with(unlock);
964                         return true;
965                 }
966                 break;
967         }
968         case ATOMIC_WAIT:
969         case ATOMIC_UNLOCK: {
970                 /* wake up the other threads */
971                 for (unsigned int i = 0; i < get_num_threads(); i++) {
972                         Thread *t = get_thread(int_to_id(i));
973                         Thread *curr_thrd = get_thread(curr);
974                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
975                                 scheduler->wake(t);
976                 }
977
978                 /* unlock the lock - after checking who was waiting on it */
979                 state->locked = NULL;
980
981                 if (!curr->is_wait())
982                         break; /* The rest is only for ATOMIC_WAIT */
983
984                 /* Should we go to sleep? (simulate spurious failures) */
985                 if (curr->get_node()->get_misc() == 0) {
986                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
987                         /* disable us */
988                         scheduler->sleep(get_thread(curr));
989                 }
990                 break;
991         }
992         case ATOMIC_NOTIFY_ALL: {
993                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
994                 //activate all the waiting threads
995                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
996                         scheduler->wake(get_thread(*rit));
997                 }
998                 waiters->clear();
999                 break;
1000         }
1001         case ATOMIC_NOTIFY_ONE: {
1002                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1003                 int wakeupthread = curr->get_node()->get_misc();
1004                 action_list_t::iterator it = waiters->begin();
1005                 advance(it, wakeupthread);
1006                 scheduler->wake(get_thread(*it));
1007                 waiters->erase(it);
1008                 break;
1009         }
1010
1011         default:
1012                 ASSERT(0);
1013         }
1014         return false;
1015 }
1016
1017 /**
1018  * @brief Check if the current pending promises allow a future value to be sent
1019  *
1020  * If one of the following is true:
1021  *  (a) there are no pending promises
1022  *  (b) the reader and writer do not cross any promises
1023  * Then, it is safe to pass a future value back now.
1024  *
1025  * Otherwise, we must save the pending future value until (a) or (b) is true
1026  *
1027  * @param writer The operation which sends the future value. Must be a write.
1028  * @param reader The operation which will observe the value. Must be a read.
1029  * @return True if the future value can be sent now; false if it must wait.
1030  */
1031 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1032                 const ModelAction *reader) const
1033 {
1034         if (promises->empty())
1035                 return true;
1036         for(int i=promises->size()-1;i>=0;i--) {
1037                 ModelAction *pr=(*promises)[i]->get_reader(0);
1038                 //reader is after promise...doesn't cross any promise
1039                 if (*reader > *pr)
1040                         return true;
1041                 //writer is after promise, reader before...bad...
1042                 if (*writer > *pr)
1043                         return false;
1044         }
1045         return true;
1046 }
1047
1048 /**
1049  * @brief Add a future value to a reader
1050  *
1051  * This function performs a few additional checks to ensure that the future
1052  * value can be feasibly observed by the reader
1053  *
1054  * @param writer The operation whose value is sent. Must be a write.
1055  * @param reader The read operation which may read the future value. Must be a read.
1056  */
1057 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1058 {
1059         /* Do more ambitious checks now that mo is more complete */
1060         if (!mo_may_allow(writer, reader))
1061                 return;
1062
1063         Node *node = reader->get_node();
1064
1065         /* Find an ancestor thread which exists at the time of the reader */
1066         Thread *write_thread = get_thread(writer);
1067         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1068                 write_thread = write_thread->get_parent();
1069
1070         struct future_value fv = {
1071                 writer->get_write_value(),
1072                 writer->get_seq_number() + params.maxfuturedelay,
1073                 write_thread->get_id(),
1074         };
1075         if (node->add_future_value(fv))
1076                 set_latest_backtrack(reader);
1077 }
1078
1079 /**
1080  * Process a write ModelAction
1081  * @param curr The ModelAction to process
1082  * @return True if the mo_graph was updated or promises were resolved
1083  */
1084 bool ModelChecker::process_write(ModelAction *curr)
1085 {
1086         /* Readers to which we may send our future value */
1087         ModelVector<ModelAction *> send_fv;
1088
1089         const ModelAction *earliest_promise_reader;
1090         bool updated_promises = false;
1091
1092         bool updated_mod_order = w_modification_order(curr, &send_fv);
1093         Promise *promise = pop_promise_to_resolve(curr);
1094
1095         if (promise) {
1096                 earliest_promise_reader = promise->get_reader(0);
1097                 updated_promises = resolve_promise(curr, promise);
1098         } else
1099                 earliest_promise_reader = NULL;
1100
1101         for (unsigned int i = 0; i < send_fv.size(); i++) {
1102                 ModelAction *read = send_fv[i];
1103
1104                 /* Don't send future values to reads after the Promise we resolve */
1105                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1106                         /* Check if future value can be sent immediately */
1107                         if (promises_may_allow(curr, read)) {
1108                                 add_future_value(curr, read);
1109                         } else {
1110                                 futurevalues->push_back(PendingFutureValue(curr, read));
1111                         }
1112                 }
1113         }
1114
1115         /* Check the pending future values */
1116         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1117                 struct PendingFutureValue pfv = (*futurevalues)[i];
1118                 if (promises_may_allow(pfv.writer, pfv.reader)) {
1119                         add_future_value(pfv.writer, pfv.reader);
1120                         futurevalues->erase(futurevalues->begin() + i);
1121                 }
1122         }
1123
1124         mo_graph->commitChanges();
1125         mo_check_promises(curr, false);
1126
1127         get_thread(curr)->set_return_value(VALUE_NONE);
1128         return updated_mod_order || updated_promises;
1129 }
1130
1131 /**
1132  * Process a fence ModelAction
1133  * @param curr The ModelAction to process
1134  * @return True if synchronization was updated
1135  */
1136 bool ModelChecker::process_fence(ModelAction *curr)
1137 {
1138         /*
1139          * fence-relaxed: no-op
1140          * fence-release: only log the occurence (not in this function), for
1141          *   use in later synchronization
1142          * fence-acquire (this function): search for hypothetical release
1143          *   sequences
1144          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1145          */
1146         bool updated = false;
1147         if (curr->is_acquire()) {
1148                 action_list_t *list = action_trace;
1149                 action_list_t::reverse_iterator rit;
1150                 /* Find X : is_read(X) && X --sb-> curr */
1151                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1152                         ModelAction *act = *rit;
1153                         if (act == curr)
1154                                 continue;
1155                         if (act->get_tid() != curr->get_tid())
1156                                 continue;
1157                         /* Stop at the beginning of the thread */
1158                         if (act->is_thread_start())
1159                                 break;
1160                         /* Stop once we reach a prior fence-acquire */
1161                         if (act->is_fence() && act->is_acquire())
1162                                 break;
1163                         if (!act->is_read())
1164                                 continue;
1165                         /* read-acquire will find its own release sequences */
1166                         if (act->is_acquire())
1167                                 continue;
1168
1169                         /* Establish hypothetical release sequences */
1170                         rel_heads_list_t release_heads;
1171                         get_release_seq_heads(curr, act, &release_heads);
1172                         for (unsigned int i = 0; i < release_heads.size(); i++)
1173                                 if (!curr->synchronize_with(release_heads[i]))
1174                                         set_bad_synchronization();
1175                         if (release_heads.size() != 0)
1176                                 updated = true;
1177                 }
1178         }
1179         return updated;
1180 }
1181
1182 /**
1183  * @brief Process the current action for thread-related activity
1184  *
1185  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1186  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1187  * synchronization, etc.  This function is a no-op for non-THREAD actions
1188  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1189  *
1190  * @param curr The current action
1191  * @return True if synchronization was updated or a thread completed
1192  */
1193 bool ModelChecker::process_thread_action(ModelAction *curr)
1194 {
1195         bool updated = false;
1196
1197         switch (curr->get_type()) {
1198         case THREAD_CREATE: {
1199                 thrd_t *thrd = (thrd_t *)curr->get_location();
1200                 struct thread_params *params = (struct thread_params *)curr->get_value();
1201                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1202                 add_thread(th);
1203                 th->set_creation(curr);
1204                 /* Promises can be satisfied by children */
1205                 for (unsigned int i = 0; i < promises->size(); i++) {
1206                         Promise *promise = (*promises)[i];
1207                         if (promise->thread_is_available(curr->get_tid()))
1208                                 promise->add_thread(th->get_id());
1209                 }
1210                 break;
1211         }
1212         case THREAD_JOIN: {
1213                 Thread *blocking = curr->get_thread_operand();
1214                 ModelAction *act = get_last_action(blocking->get_id());
1215                 curr->synchronize_with(act);
1216                 updated = true; /* trigger rel-seq checks */
1217                 break;
1218         }
1219         case THREAD_FINISH: {
1220                 Thread *th = get_thread(curr);
1221                 /* Wake up any joining threads */
1222                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1223                         Thread *waiting = get_thread(int_to_id(i));
1224                         if (waiting->waiting_on() == th &&
1225                                         waiting->get_pending()->is_thread_join())
1226                                 scheduler->wake(waiting);
1227                 }
1228                 th->complete();
1229                 /* Completed thread can't satisfy promises */
1230                 for (unsigned int i = 0; i < promises->size(); i++) {
1231                         Promise *promise = (*promises)[i];
1232                         if (promise->thread_is_available(th->get_id()))
1233                                 if (promise->eliminate_thread(th->get_id()))
1234                                         priv->failed_promise = true;
1235                 }
1236                 updated = true; /* trigger rel-seq checks */
1237                 break;
1238         }
1239         case THREAD_START: {
1240                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1241                 break;
1242         }
1243         default:
1244                 break;
1245         }
1246
1247         return updated;
1248 }
1249
1250 /**
1251  * @brief Process the current action for release sequence fixup activity
1252  *
1253  * Performs model-checker release sequence fixups for the current action,
1254  * forcing a single pending release sequence to break (with a given, potential
1255  * "loose" write) or to complete (i.e., synchronize). If a pending release
1256  * sequence forms a complete release sequence, then we must perform the fixup
1257  * synchronization, mo_graph additions, etc.
1258  *
1259  * @param curr The current action; must be a release sequence fixup action
1260  * @param work_queue The work queue to which to add work items as they are
1261  * generated
1262  */
1263 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1264 {
1265         const ModelAction *write = curr->get_node()->get_relseq_break();
1266         struct release_seq *sequence = pending_rel_seqs->back();
1267         pending_rel_seqs->pop_back();
1268         ASSERT(sequence);
1269         ModelAction *acquire = sequence->acquire;
1270         const ModelAction *rf = sequence->rf;
1271         const ModelAction *release = sequence->release;
1272         ASSERT(acquire);
1273         ASSERT(release);
1274         ASSERT(rf);
1275         ASSERT(release->same_thread(rf));
1276
1277         if (write == NULL) {
1278                 /**
1279                  * @todo Forcing a synchronization requires that we set
1280                  * modification order constraints. For instance, we can't allow
1281                  * a fixup sequence in which two separate read-acquire
1282                  * operations read from the same sequence, where the first one
1283                  * synchronizes and the other doesn't. Essentially, we can't
1284                  * allow any writes to insert themselves between 'release' and
1285                  * 'rf'
1286                  */
1287
1288                 /* Must synchronize */
1289                 if (!acquire->synchronize_with(release)) {
1290                         set_bad_synchronization();
1291                         return;
1292                 }
1293                 /* Re-check all pending release sequences */
1294                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1295                 /* Re-check act for mo_graph edges */
1296                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1297
1298                 /* propagate synchronization to later actions */
1299                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1300                 for (; (*rit) != acquire; rit++) {
1301                         ModelAction *propagate = *rit;
1302                         if (acquire->happens_before(propagate)) {
1303                                 propagate->synchronize_with(acquire);
1304                                 /* Re-check 'propagate' for mo_graph edges */
1305                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1306                         }
1307                 }
1308         } else {
1309                 /* Break release sequence with new edges:
1310                  *   release --mo--> write --mo--> rf */
1311                 mo_graph->addEdge(release, write);
1312                 mo_graph->addEdge(write, rf);
1313         }
1314
1315         /* See if we have realized a data race */
1316         checkDataRaces();
1317 }
1318
1319 /**
1320  * Initialize the current action by performing one or more of the following
1321  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1322  * in the NodeStack, manipulating backtracking sets, allocating and
1323  * initializing clock vectors, and computing the promises to fulfill.
1324  *
1325  * @param curr The current action, as passed from the user context; may be
1326  * freed/invalidated after the execution of this function, with a different
1327  * action "returned" its place (pass-by-reference)
1328  * @return True if curr is a newly-explored action; false otherwise
1329  */
1330 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1331 {
1332         ModelAction *newcurr;
1333
1334         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1335                 newcurr = process_rmw(*curr);
1336                 delete *curr;
1337
1338                 if (newcurr->is_rmw())
1339                         compute_promises(newcurr);
1340
1341                 *curr = newcurr;
1342                 return false;
1343         }
1344
1345         (*curr)->set_seq_number(get_next_seq_num());
1346
1347         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1348         if (newcurr) {
1349                 /* First restore type and order in case of RMW operation */
1350                 if ((*curr)->is_rmwr())
1351                         newcurr->copy_typeandorder(*curr);
1352
1353                 ASSERT((*curr)->get_location() == newcurr->get_location());
1354                 newcurr->copy_from_new(*curr);
1355
1356                 /* Discard duplicate ModelAction; use action from NodeStack */
1357                 delete *curr;
1358
1359                 /* Always compute new clock vector */
1360                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1361
1362                 *curr = newcurr;
1363                 return false; /* Action was explored previously */
1364         } else {
1365                 newcurr = *curr;
1366
1367                 /* Always compute new clock vector */
1368                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1369
1370                 /* Assign most recent release fence */
1371                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1372
1373                 /*
1374                  * Perform one-time actions when pushing new ModelAction onto
1375                  * NodeStack
1376                  */
1377                 if (newcurr->is_write())
1378                         compute_promises(newcurr);
1379                 else if (newcurr->is_relseq_fixup())
1380                         compute_relseq_breakwrites(newcurr);
1381                 else if (newcurr->is_wait())
1382                         newcurr->get_node()->set_misc_max(2);
1383                 else if (newcurr->is_notify_one()) {
1384                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1385                 }
1386                 return true; /* This was a new ModelAction */
1387         }
1388 }
1389
1390 /**
1391  * @brief Establish reads-from relation between two actions
1392  *
1393  * Perform basic operations involved with establishing a concrete rf relation,
1394  * including setting the ModelAction data and checking for release sequences.
1395  *
1396  * @param act The action that is reading (must be a read)
1397  * @param rf The action from which we are reading (must be a write)
1398  *
1399  * @return True if this read established synchronization
1400  */
1401 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1402 {
1403         ASSERT(rf);
1404         ASSERT(rf->is_write());
1405
1406         act->set_read_from(rf);
1407         if (act->is_acquire()) {
1408                 rel_heads_list_t release_heads;
1409                 get_release_seq_heads(act, act, &release_heads);
1410                 int num_heads = release_heads.size();
1411                 for (unsigned int i = 0; i < release_heads.size(); i++)
1412                         if (!act->synchronize_with(release_heads[i])) {
1413                                 set_bad_synchronization();
1414                                 num_heads--;
1415                         }
1416                 return num_heads > 0;
1417         }
1418         return false;
1419 }
1420
1421 /**
1422  * Check promises and eliminate potentially-satisfying threads when a thread is
1423  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1424  * no longer satisfy a promise generated from that thread.
1425  *
1426  * @param blocker The thread on which a thread is waiting
1427  * @param waiting The waiting thread
1428  */
1429 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1430 {
1431         for (unsigned int i = 0; i < promises->size(); i++) {
1432                 Promise *promise = (*promises)[i];
1433                 if (!promise->thread_is_available(waiting->get_id()))
1434                         continue;
1435                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1436                         ModelAction *reader = promise->get_reader(j);
1437                         if (reader->get_tid() != blocker->get_id())
1438                                 continue;
1439                         if (promise->eliminate_thread(waiting->get_id())) {
1440                                 /* Promise has failed */
1441                                 priv->failed_promise = true;
1442                         } else {
1443                                 /* Only eliminate the 'waiting' thread once */
1444                                 return;
1445                         }
1446                 }
1447         }
1448 }
1449
1450 /**
1451  * @brief Check whether a model action is enabled.
1452  *
1453  * Checks whether a lock or join operation would be successful (i.e., is the
1454  * lock already locked, or is the joined thread already complete). If not, put
1455  * the action in a waiter list.
1456  *
1457  * @param curr is the ModelAction to check whether it is enabled.
1458  * @return a bool that indicates whether the action is enabled.
1459  */
1460 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1461         if (curr->is_lock()) {
1462                 std::mutex *lock = curr->get_mutex();
1463                 struct std::mutex_state *state = lock->get_state();
1464                 if (state->locked)
1465                         return false;
1466         } else if (curr->is_thread_join()) {
1467                 Thread *blocking = curr->get_thread_operand();
1468                 if (!blocking->is_complete()) {
1469                         thread_blocking_check_promises(blocking, get_thread(curr));
1470                         return false;
1471                 }
1472         }
1473
1474         return true;
1475 }
1476
1477 /**
1478  * This is the heart of the model checker routine. It performs model-checking
1479  * actions corresponding to a given "current action." Among other processes, it
1480  * calculates reads-from relationships, updates synchronization clock vectors,
1481  * forms a memory_order constraints graph, and handles replay/backtrack
1482  * execution when running permutations of previously-observed executions.
1483  *
1484  * @param curr The current action to process
1485  * @return The ModelAction that is actually executed; may be different than
1486  * curr; may be NULL, if the current action is not enabled to run
1487  */
1488 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1489 {
1490         ASSERT(curr);
1491         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1492         bool newly_explored = initialize_curr_action(&curr);
1493
1494         DBG();
1495         if (DBG_ENABLED())
1496                 curr->print();
1497
1498         wake_up_sleeping_actions(curr);
1499
1500         /* Compute fairness information for CHESS yield algorithm */
1501         if (model->params.yieldon) {
1502                 curr->get_node()->update_yield(scheduler);
1503         }
1504
1505         /* Add the action to lists before any other model-checking tasks */
1506         if (!second_part_of_rmw)
1507                 add_action_to_lists(curr);
1508
1509         /* Build may_read_from set for newly-created actions */
1510         if (newly_explored && curr->is_read())
1511                 build_may_read_from(curr);
1512
1513         /* Initialize work_queue with the "current action" work */
1514         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1515         while (!work_queue.empty() && !has_asserted()) {
1516                 WorkQueueEntry work = work_queue.front();
1517                 work_queue.pop_front();
1518
1519                 switch (work.type) {
1520                 case WORK_CHECK_CURR_ACTION: {
1521                         ModelAction *act = work.action;
1522                         bool update = false; /* update this location's release seq's */
1523                         bool update_all = false; /* update all release seq's */
1524
1525                         if (process_thread_action(curr))
1526                                 update_all = true;
1527
1528                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1529                                 update = true;
1530
1531                         if (act->is_write() && process_write(act))
1532                                 update = true;
1533
1534                         if (act->is_fence() && process_fence(act))
1535                                 update_all = true;
1536
1537                         if (act->is_mutex_op() && process_mutex(act))
1538                                 update_all = true;
1539
1540                         if (act->is_relseq_fixup())
1541                                 process_relseq_fixup(curr, &work_queue);
1542
1543                         if (update_all)
1544                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1545                         else if (update)
1546                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1547                         break;
1548                 }
1549                 case WORK_CHECK_RELEASE_SEQ:
1550                         resolve_release_sequences(work.location, &work_queue);
1551                         break;
1552                 case WORK_CHECK_MO_EDGES: {
1553                         /** @todo Complete verification of work_queue */
1554                         ModelAction *act = work.action;
1555                         bool updated = false;
1556
1557                         if (act->is_read()) {
1558                                 const ModelAction *rf = act->get_reads_from();
1559                                 const Promise *promise = act->get_reads_from_promise();
1560                                 if (rf) {
1561                                         if (r_modification_order(act, rf))
1562                                                 updated = true;
1563                                 } else if (promise) {
1564                                         if (r_modification_order(act, promise))
1565                                                 updated = true;
1566                                 }
1567                         }
1568                         if (act->is_write()) {
1569                                 if (w_modification_order(act, NULL))
1570                                         updated = true;
1571                         }
1572                         mo_graph->commitChanges();
1573
1574                         if (updated)
1575                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1576                         break;
1577                 }
1578                 default:
1579                         ASSERT(false);
1580                         break;
1581                 }
1582         }
1583
1584         check_curr_backtracking(curr);
1585         set_backtracking(curr);
1586         return curr;
1587 }
1588
1589 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1590 {
1591         Node *currnode = curr->get_node();
1592         Node *parnode = currnode->get_parent();
1593
1594         if ((parnode && !parnode->backtrack_empty()) ||
1595                          !currnode->misc_empty() ||
1596                          !currnode->read_from_empty() ||
1597                          !currnode->promise_empty() ||
1598                          !currnode->relseq_break_empty()) {
1599                 set_latest_backtrack(curr);
1600         }
1601 }
1602
1603 bool ModelChecker::promises_expired() const
1604 {
1605         for (unsigned int i = 0; i < promises->size(); i++) {
1606                 Promise *promise = (*promises)[i];
1607                 if (promise->get_expiration() < priv->used_sequence_numbers)
1608                         return true;
1609         }
1610         return false;
1611 }
1612
1613 /**
1614  * This is the strongest feasibility check available.
1615  * @return whether the current trace (partial or complete) must be a prefix of
1616  * a feasible trace.
1617  */
1618 bool ModelChecker::isfeasibleprefix() const
1619 {
1620         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1621 }
1622
1623 /**
1624  * Print disagnostic information about an infeasible execution
1625  * @param prefix A string to prefix the output with; if NULL, then a default
1626  * message prefix will be provided
1627  */
1628 void ModelChecker::print_infeasibility(const char *prefix) const
1629 {
1630         char buf[100];
1631         char *ptr = buf;
1632         if (mo_graph->checkForCycles())
1633                 ptr += sprintf(ptr, "[mo cycle]");
1634         if (priv->failed_promise)
1635                 ptr += sprintf(ptr, "[failed promise]");
1636         if (priv->too_many_reads)
1637                 ptr += sprintf(ptr, "[too many reads]");
1638         if (priv->no_valid_reads)
1639                 ptr += sprintf(ptr, "[no valid reads-from]");
1640         if (priv->bad_synchronization)
1641                 ptr += sprintf(ptr, "[bad sw ordering]");
1642         if (promises_expired())
1643                 ptr += sprintf(ptr, "[promise expired]");
1644         if (promises->size() != 0)
1645                 ptr += sprintf(ptr, "[unresolved promise]");
1646         if (ptr != buf)
1647                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1648 }
1649
1650 /**
1651  * Returns whether the current completed trace is feasible, except for pending
1652  * release sequences.
1653  */
1654 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1655 {
1656         return !is_infeasible() && promises->size() == 0;
1657 }
1658
1659 /**
1660  * Check if the current partial trace is infeasible. Does not check any
1661  * end-of-execution flags, which might rule out the execution. Thus, this is
1662  * useful only for ruling an execution as infeasible.
1663  * @return whether the current partial trace is infeasible.
1664  */
1665 bool ModelChecker::is_infeasible() const
1666 {
1667         return mo_graph->checkForCycles() ||
1668                 priv->no_valid_reads ||
1669                 priv->failed_promise ||
1670                 priv->too_many_reads ||
1671                 priv->bad_synchronization ||
1672                 promises_expired();
1673 }
1674
1675 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1676 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1677         ModelAction *lastread = get_last_action(act->get_tid());
1678         lastread->process_rmw(act);
1679         if (act->is_rmw()) {
1680                 if (lastread->get_reads_from())
1681                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1682                 else
1683                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1684                 mo_graph->commitChanges();
1685         }
1686         return lastread;
1687 }
1688
1689 /**
1690  * A helper function for ModelChecker::check_recency, to check if the current
1691  * thread is able to read from a different write/promise for 'params.maxreads'
1692  * number of steps and if that write/promise should become visible (i.e., is
1693  * ordered later in the modification order). This helps model memory liveness.
1694  *
1695  * @param curr The current action. Must be a read.
1696  * @param rf The write/promise from which we plan to read
1697  * @param other_rf The write/promise from which we may read
1698  * @return True if we were able to read from other_rf for params.maxreads steps
1699  */
1700 template <typename T, typename U>
1701 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1702 {
1703         /* Need a different write/promise */
1704         if (other_rf->equals(rf))
1705                 return false;
1706
1707         /* Only look for "newer" writes/promises */
1708         if (!mo_graph->checkReachable(rf, other_rf))
1709                 return false;
1710
1711         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1712         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1713         action_list_t::reverse_iterator rit = list->rbegin();
1714         ASSERT((*rit) == curr);
1715         /* Skip past curr */
1716         rit++;
1717
1718         /* Does this write/promise work for everyone? */
1719         for (int i = 0; i < params.maxreads; i++, rit++) {
1720                 ModelAction *act = *rit;
1721                 if (!act->may_read_from(other_rf))
1722                         return false;
1723         }
1724         return true;
1725 }
1726
1727 /**
1728  * Checks whether a thread has read from the same write or Promise for too many
1729  * times without seeing the effects of a later write/Promise.
1730  *
1731  * Basic idea:
1732  * 1) there must a different write/promise that we could read from,
1733  * 2) we must have read from the same write/promise in excess of maxreads times,
1734  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1735  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1736  *
1737  * If so, we decide that the execution is no longer feasible.
1738  *
1739  * @param curr The current action. Must be a read.
1740  * @param rf The ModelAction/Promise from which we might read.
1741  * @return True if the read should succeed; false otherwise
1742  */
1743 template <typename T>
1744 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1745 {
1746         if (!params.maxreads)
1747                 return true;
1748
1749         //NOTE: Next check is just optimization, not really necessary....
1750         if (curr->get_node()->get_read_from_past_size() +
1751                         curr->get_node()->get_read_from_promise_size() <= 1)
1752                 return true;
1753
1754         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1755         int tid = id_to_int(curr->get_tid());
1756         ASSERT(tid < (int)thrd_lists->size());
1757         action_list_t *list = &(*thrd_lists)[tid];
1758         action_list_t::reverse_iterator rit = list->rbegin();
1759         ASSERT((*rit) == curr);
1760         /* Skip past curr */
1761         rit++;
1762
1763         action_list_t::reverse_iterator ritcopy = rit;
1764         /* See if we have enough reads from the same value */
1765         for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1766                 if (ritcopy == list->rend())
1767                         return true;
1768                 ModelAction *act = *ritcopy;
1769                 if (!act->is_read())
1770                         return true;
1771                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1772                         return true;
1773                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1774                         return true;
1775                 if (act->get_node()->get_read_from_past_size() +
1776                                 act->get_node()->get_read_from_promise_size() <= 1)
1777                         return true;
1778         }
1779         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1780                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1781                 if (should_read_instead(curr, rf, write))
1782                         return false; /* liveness failure */
1783         }
1784         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1785                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1786                 if (should_read_instead(curr, rf, promise))
1787                         return false; /* liveness failure */
1788         }
1789         return true;
1790 }
1791
1792 /**
1793  * Updates the mo_graph with the constraints imposed from the current
1794  * read.
1795  *
1796  * Basic idea is the following: Go through each other thread and find
1797  * the last action that happened before our read.  Two cases:
1798  *
1799  * (1) The action is a write => that write must either occur before
1800  * the write we read from or be the write we read from.
1801  *
1802  * (2) The action is a read => the write that that action read from
1803  * must occur before the write we read from or be the same write.
1804  *
1805  * @param curr The current action. Must be a read.
1806  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1807  * @return True if modification order edges were added; false otherwise
1808  */
1809 template <typename rf_type>
1810 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1811 {
1812         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1813         unsigned int i;
1814         bool added = false;
1815         ASSERT(curr->is_read());
1816
1817         /* Last SC fence in the current thread */
1818         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1819         ModelAction *last_sc_write = NULL;
1820         if (curr->is_seqcst())
1821                 last_sc_write = get_last_seq_cst_write(curr);
1822
1823         /* Iterate over all threads */
1824         for (i = 0; i < thrd_lists->size(); i++) {
1825                 /* Last SC fence in thread i */
1826                 ModelAction *last_sc_fence_thread_local = NULL;
1827                 if (int_to_id((int)i) != curr->get_tid())
1828                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1829
1830                 /* Last SC fence in thread i, before last SC fence in current thread */
1831                 ModelAction *last_sc_fence_thread_before = NULL;
1832                 if (last_sc_fence_local)
1833                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1834
1835                 /* Iterate over actions in thread, starting from most recent */
1836                 action_list_t *list = &(*thrd_lists)[i];
1837                 action_list_t::reverse_iterator rit;
1838                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1839                         ModelAction *act = *rit;
1840
1841                         /* Skip curr */
1842                         if (act == curr)
1843                                 continue;
1844                         /* Don't want to add reflexive edges on 'rf' */
1845                         if (act->equals(rf)) {
1846                                 if (act->happens_before(curr))
1847                                         break;
1848                                 else
1849                                         continue;
1850                         }
1851
1852                         if (act->is_write()) {
1853                                 /* C++, Section 29.3 statement 5 */
1854                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1855                                                 *act < *last_sc_fence_thread_local) {
1856                                         added = mo_graph->addEdge(act, rf) || added;
1857                                         break;
1858                                 }
1859                                 /* C++, Section 29.3 statement 4 */
1860                                 else if (act->is_seqcst() && last_sc_fence_local &&
1861                                                 *act < *last_sc_fence_local) {
1862                                         added = mo_graph->addEdge(act, rf) || added;
1863                                         break;
1864                                 }
1865                                 /* C++, Section 29.3 statement 6 */
1866                                 else if (last_sc_fence_thread_before &&
1867                                                 *act < *last_sc_fence_thread_before) {
1868                                         added = mo_graph->addEdge(act, rf) || added;
1869                                         break;
1870                                 }
1871                         }
1872
1873                         /* C++, Section 29.3 statement 3 (second subpoint) */
1874                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1875                                 added = mo_graph->addEdge(act, rf) || added;
1876                                 break;
1877                         }
1878
1879                         /*
1880                          * Include at most one act per-thread that "happens
1881                          * before" curr
1882                          */
1883                         if (act->happens_before(curr)) {
1884                                 if (act->is_write()) {
1885                                         added = mo_graph->addEdge(act, rf) || added;
1886                                 } else {
1887                                         const ModelAction *prevrf = act->get_reads_from();
1888                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1889                                         if (prevrf) {
1890                                                 if (!prevrf->equals(rf))
1891                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1892                                         } else if (!prevrf_promise->equals(rf)) {
1893                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1894                                         }
1895                                 }
1896                                 break;
1897                         }
1898                 }
1899         }
1900
1901         /*
1902          * All compatible, thread-exclusive promises must be ordered after any
1903          * concrete loads from the same thread
1904          */
1905         for (unsigned int i = 0; i < promises->size(); i++)
1906                 if ((*promises)[i]->is_compatible_exclusive(curr))
1907                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1908
1909         return added;
1910 }
1911
1912 /**
1913  * Updates the mo_graph with the constraints imposed from the current write.
1914  *
1915  * Basic idea is the following: Go through each other thread and find
1916  * the lastest action that happened before our write.  Two cases:
1917  *
1918  * (1) The action is a write => that write must occur before
1919  * the current write
1920  *
1921  * (2) The action is a read => the write that that action read from
1922  * must occur before the current write.
1923  *
1924  * This method also handles two other issues:
1925  *
1926  * (I) Sequential Consistency: Making sure that if the current write is
1927  * seq_cst, that it occurs after the previous seq_cst write.
1928  *
1929  * (II) Sending the write back to non-synchronizing reads.
1930  *
1931  * @param curr The current action. Must be a write.
1932  * @param send_fv A vector for stashing reads to which we may pass our future
1933  * value. If NULL, then don't record any future values.
1934  * @return True if modification order edges were added; false otherwise
1935  */
1936 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1937 {
1938         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1939         unsigned int i;
1940         bool added = false;
1941         ASSERT(curr->is_write());
1942
1943         if (curr->is_seqcst()) {
1944                 /* We have to at least see the last sequentially consistent write,
1945                          so we are initialized. */
1946                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1947                 if (last_seq_cst != NULL) {
1948                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1949                 }
1950         }
1951
1952         /* Last SC fence in the current thread */
1953         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1954
1955         /* Iterate over all threads */
1956         for (i = 0; i < thrd_lists->size(); i++) {
1957                 /* Last SC fence in thread i, before last SC fence in current thread */
1958                 ModelAction *last_sc_fence_thread_before = NULL;
1959                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1960                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1961
1962                 /* Iterate over actions in thread, starting from most recent */
1963                 action_list_t *list = &(*thrd_lists)[i];
1964                 action_list_t::reverse_iterator rit;
1965                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1966                         ModelAction *act = *rit;
1967                         if (act == curr) {
1968                                 /*
1969                                  * 1) If RMW and it actually read from something, then we
1970                                  * already have all relevant edges, so just skip to next
1971                                  * thread.
1972                                  *
1973                                  * 2) If RMW and it didn't read from anything, we should
1974                                  * whatever edge we can get to speed up convergence.
1975                                  *
1976                                  * 3) If normal write, we need to look at earlier actions, so
1977                                  * continue processing list.
1978                                  */
1979                                 if (curr->is_rmw()) {
1980                                         if (curr->get_reads_from() != NULL)
1981                                                 break;
1982                                         else
1983                                                 continue;
1984                                 } else
1985                                         continue;
1986                         }
1987
1988                         /* C++, Section 29.3 statement 7 */
1989                         if (last_sc_fence_thread_before && act->is_write() &&
1990                                         *act < *last_sc_fence_thread_before) {
1991                                 added = mo_graph->addEdge(act, curr) || added;
1992                                 break;
1993                         }
1994
1995                         /*
1996                          * Include at most one act per-thread that "happens
1997                          * before" curr
1998                          */
1999                         if (act->happens_before(curr)) {
2000                                 /*
2001                                  * Note: if act is RMW, just add edge:
2002                                  *   act --mo--> curr
2003                                  * The following edge should be handled elsewhere:
2004                                  *   readfrom(act) --mo--> act
2005                                  */
2006                                 if (act->is_write())
2007                                         added = mo_graph->addEdge(act, curr) || added;
2008                                 else if (act->is_read()) {
2009                                         //if previous read accessed a null, just keep going
2010                                         if (act->get_reads_from() == NULL)
2011                                                 continue;
2012                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2013                                 }
2014                                 break;
2015                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2016                                                      !act->same_thread(curr)) {
2017                                 /* We have an action that:
2018                                    (1) did not happen before us
2019                                    (2) is a read and we are a write
2020                                    (3) cannot synchronize with us
2021                                    (4) is in a different thread
2022                                    =>
2023                                    that read could potentially read from our write.  Note that
2024                                    these checks are overly conservative at this point, we'll
2025                                    do more checks before actually removing the
2026                                    pendingfuturevalue.
2027
2028                                  */
2029                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2030                                         if (!is_infeasible())
2031                                                 send_fv->push_back(act);
2032                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2033                                                 add_future_value(curr, act);
2034                                 }
2035                         }
2036                 }
2037         }
2038
2039         /*
2040          * All compatible, thread-exclusive promises must be ordered after any
2041          * concrete stores to the same thread, or else they can be merged with
2042          * this store later
2043          */
2044         for (unsigned int i = 0; i < promises->size(); i++)
2045                 if ((*promises)[i]->is_compatible_exclusive(curr))
2046                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2047
2048         return added;
2049 }
2050
2051 /** Arbitrary reads from the future are not allowed.  Section 29.3
2052  * part 9 places some constraints.  This method checks one result of constraint
2053  * constraint.  Others require compiler support. */
2054 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2055 {
2056         if (!writer->is_rmw())
2057                 return true;
2058
2059         if (!reader->is_rmw())
2060                 return true;
2061
2062         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2063                 if (search == reader)
2064                         return false;
2065                 if (search->get_tid() == reader->get_tid() &&
2066                                 search->happens_before(reader))
2067                         break;
2068         }
2069
2070         return true;
2071 }
2072
2073 /**
2074  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2075  * some constraints. This method checks one the following constraint (others
2076  * require compiler support):
2077  *
2078  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
2079  */
2080 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2081 {
2082         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2083         unsigned int i;
2084         /* Iterate over all threads */
2085         for (i = 0; i < thrd_lists->size(); i++) {
2086                 const ModelAction *write_after_read = NULL;
2087
2088                 /* Iterate over actions in thread, starting from most recent */
2089                 action_list_t *list = &(*thrd_lists)[i];
2090                 action_list_t::reverse_iterator rit;
2091                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2092                         ModelAction *act = *rit;
2093
2094                         /* Don't disallow due to act == reader */
2095                         if (!reader->happens_before(act) || reader == act)
2096                                 break;
2097                         else if (act->is_write())
2098                                 write_after_read = act;
2099                         else if (act->is_read() && act->get_reads_from() != NULL)
2100                                 write_after_read = act->get_reads_from();
2101                 }
2102
2103                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2104                         return false;
2105         }
2106         return true;
2107 }
2108
2109 /**
2110  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2111  * The ModelAction under consideration is expected to be taking part in
2112  * release/acquire synchronization as an object of the "reads from" relation.
2113  * Note that this can only provide release sequence support for RMW chains
2114  * which do not read from the future, as those actions cannot be traced until
2115  * their "promise" is fulfilled. Similarly, we may not even establish the
2116  * presence of a release sequence with certainty, as some modification order
2117  * constraints may be decided further in the future. Thus, this function
2118  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2119  * and a boolean representing certainty.
2120  *
2121  * @param rf The action that might be part of a release sequence. Must be a
2122  * write.
2123  * @param release_heads A pass-by-reference style return parameter. After
2124  * execution of this function, release_heads will contain the heads of all the
2125  * relevant release sequences, if any exists with certainty
2126  * @param pending A pass-by-reference style return parameter which is only used
2127  * when returning false (i.e., uncertain). Returns most information regarding
2128  * an uncertain release sequence, including any write operations that might
2129  * break the sequence.
2130  * @return true, if the ModelChecker is certain that release_heads is complete;
2131  * false otherwise
2132  */
2133 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2134                 rel_heads_list_t *release_heads,
2135                 struct release_seq *pending) const
2136 {
2137         /* Only check for release sequences if there are no cycles */
2138         if (mo_graph->checkForCycles())
2139                 return false;
2140
2141         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2142                 ASSERT(rf->is_write());
2143
2144                 if (rf->is_release())
2145                         release_heads->push_back(rf);
2146                 else if (rf->get_last_fence_release())
2147                         release_heads->push_back(rf->get_last_fence_release());
2148                 if (!rf->is_rmw())
2149                         break; /* End of RMW chain */
2150
2151                 /** @todo Need to be smarter here...  In the linux lock
2152                  * example, this will run to the beginning of the program for
2153                  * every acquire. */
2154                 /** @todo The way to be smarter here is to keep going until 1
2155                  * thread has a release preceded by an acquire and you've seen
2156                  *       both. */
2157
2158                 /* acq_rel RMW is a sufficient stopping condition */
2159                 if (rf->is_acquire() && rf->is_release())
2160                         return true; /* complete */
2161         };
2162         if (!rf) {
2163                 /* read from future: need to settle this later */
2164                 pending->rf = NULL;
2165                 return false; /* incomplete */
2166         }
2167
2168         if (rf->is_release())
2169                 return true; /* complete */
2170
2171         /* else relaxed write
2172          * - check for fence-release in the same thread (29.8, stmt. 3)
2173          * - check modification order for contiguous subsequence
2174          *   -> rf must be same thread as release */
2175
2176         const ModelAction *fence_release = rf->get_last_fence_release();
2177         /* Synchronize with a fence-release unconditionally; we don't need to
2178          * find any more "contiguous subsequence..." for it */
2179         if (fence_release)
2180                 release_heads->push_back(fence_release);
2181
2182         int tid = id_to_int(rf->get_tid());
2183         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2184         action_list_t *list = &(*thrd_lists)[tid];
2185         action_list_t::const_reverse_iterator rit;
2186
2187         /* Find rf in the thread list */
2188         rit = std::find(list->rbegin(), list->rend(), rf);
2189         ASSERT(rit != list->rend());
2190
2191         /* Find the last {write,fence}-release */
2192         for (; rit != list->rend(); rit++) {
2193                 if (fence_release && *(*rit) < *fence_release)
2194                         break;
2195                 if ((*rit)->is_release())
2196                         break;
2197         }
2198         if (rit == list->rend()) {
2199                 /* No write-release in this thread */
2200                 return true; /* complete */
2201         } else if (fence_release && *(*rit) < *fence_release) {
2202                 /* The fence-release is more recent (and so, "stronger") than
2203                  * the most recent write-release */
2204                 return true; /* complete */
2205         } /* else, need to establish contiguous release sequence */
2206         ModelAction *release = *rit;
2207
2208         ASSERT(rf->same_thread(release));
2209
2210         pending->writes.clear();
2211
2212         bool certain = true;
2213         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2214                 if (id_to_int(rf->get_tid()) == (int)i)
2215                         continue;
2216                 list = &(*thrd_lists)[i];
2217
2218                 /* Can we ensure no future writes from this thread may break
2219                  * the release seq? */
2220                 bool future_ordered = false;
2221
2222                 ModelAction *last = get_last_action(int_to_id(i));
2223                 Thread *th = get_thread(int_to_id(i));
2224                 if ((last && rf->happens_before(last)) ||
2225                                 !is_enabled(th) ||
2226                                 th->is_complete())
2227                         future_ordered = true;
2228
2229                 ASSERT(!th->is_model_thread() || future_ordered);
2230
2231                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2232                         const ModelAction *act = *rit;
2233                         /* Reach synchronization -> this thread is complete */
2234                         if (act->happens_before(release))
2235                                 break;
2236                         if (rf->happens_before(act)) {
2237                                 future_ordered = true;
2238                                 continue;
2239                         }
2240
2241                         /* Only non-RMW writes can break release sequences */
2242                         if (!act->is_write() || act->is_rmw())
2243                                 continue;
2244
2245                         /* Check modification order */
2246                         if (mo_graph->checkReachable(rf, act)) {
2247                                 /* rf --mo--> act */
2248                                 future_ordered = true;
2249                                 continue;
2250                         }
2251                         if (mo_graph->checkReachable(act, release))
2252                                 /* act --mo--> release */
2253                                 break;
2254                         if (mo_graph->checkReachable(release, act) &&
2255                                       mo_graph->checkReachable(act, rf)) {
2256                                 /* release --mo-> act --mo--> rf */
2257                                 return true; /* complete */
2258                         }
2259                         /* act may break release sequence */
2260                         pending->writes.push_back(act);
2261                         certain = false;
2262                 }
2263                 if (!future_ordered)
2264                         certain = false; /* This thread is uncertain */
2265         }
2266
2267         if (certain) {
2268                 release_heads->push_back(release);
2269                 pending->writes.clear();
2270         } else {
2271                 pending->release = release;
2272                 pending->rf = rf;
2273         }
2274         return certain;
2275 }
2276
2277 /**
2278  * An interface for getting the release sequence head(s) with which a
2279  * given ModelAction must synchronize. This function only returns a non-empty
2280  * result when it can locate a release sequence head with certainty. Otherwise,
2281  * it may mark the internal state of the ModelChecker so that it will handle
2282  * the release sequence at a later time, causing @a acquire to update its
2283  * synchronization at some later point in execution.
2284  *
2285  * @param acquire The 'acquire' action that may synchronize with a release
2286  * sequence
2287  * @param read The read action that may read from a release sequence; this may
2288  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2289  * when 'acquire' is a fence-acquire)
2290  * @param release_heads A pass-by-reference return parameter. Will be filled
2291  * with the head(s) of the release sequence(s), if they exists with certainty.
2292  * @see ModelChecker::release_seq_heads
2293  */
2294 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2295                 ModelAction *read, rel_heads_list_t *release_heads)
2296 {
2297         const ModelAction *rf = read->get_reads_from();
2298         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2299         sequence->acquire = acquire;
2300         sequence->read = read;
2301
2302         if (!release_seq_heads(rf, release_heads, sequence)) {
2303                 /* add act to 'lazy checking' list */
2304                 pending_rel_seqs->push_back(sequence);
2305         } else {
2306                 snapshot_free(sequence);
2307         }
2308 }
2309
2310 /**
2311  * Attempt to resolve all stashed operations that might synchronize with a
2312  * release sequence for a given location. This implements the "lazy" portion of
2313  * determining whether or not a release sequence was contiguous, since not all
2314  * modification order information is present at the time an action occurs.
2315  *
2316  * @param location The location/object that should be checked for release
2317  * sequence resolutions. A NULL value means to check all locations.
2318  * @param work_queue The work queue to which to add work items as they are
2319  * generated
2320  * @return True if any updates occurred (new synchronization, new mo_graph
2321  * edges)
2322  */
2323 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2324 {
2325         bool updated = false;
2326         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2327         while (it != pending_rel_seqs->end()) {
2328                 struct release_seq *pending = *it;
2329                 ModelAction *acquire = pending->acquire;
2330                 const ModelAction *read = pending->read;
2331
2332                 /* Only resolve sequences on the given location, if provided */
2333                 if (location && read->get_location() != location) {
2334                         it++;
2335                         continue;
2336                 }
2337
2338                 const ModelAction *rf = read->get_reads_from();
2339                 rel_heads_list_t release_heads;
2340                 bool complete;
2341                 complete = release_seq_heads(rf, &release_heads, pending);
2342                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2343                         if (!acquire->has_synchronized_with(release_heads[i])) {
2344                                 if (acquire->synchronize_with(release_heads[i]))
2345                                         updated = true;
2346                                 else
2347                                         set_bad_synchronization();
2348                         }
2349                 }
2350
2351                 if (updated) {
2352                         /* Re-check all pending release sequences */
2353                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2354                         /* Re-check read-acquire for mo_graph edges */
2355                         if (acquire->is_read())
2356                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2357
2358                         /* propagate synchronization to later actions */
2359                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2360                         for (; (*rit) != acquire; rit++) {
2361                                 ModelAction *propagate = *rit;
2362                                 if (acquire->happens_before(propagate)) {
2363                                         propagate->synchronize_with(acquire);
2364                                         /* Re-check 'propagate' for mo_graph edges */
2365                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2366                                 }
2367                         }
2368                 }
2369                 if (complete) {
2370                         it = pending_rel_seqs->erase(it);
2371                         snapshot_free(pending);
2372                 } else {
2373                         it++;
2374                 }
2375         }
2376
2377         // If we resolved promises or data races, see if we have realized a data race.
2378         checkDataRaces();
2379
2380         return updated;
2381 }
2382
2383 /**
2384  * Performs various bookkeeping operations for the current ModelAction. For
2385  * instance, adds action to the per-object, per-thread action vector and to the
2386  * action trace list of all thread actions.
2387  *
2388  * @param act is the ModelAction to add.
2389  */
2390 void ModelChecker::add_action_to_lists(ModelAction *act)
2391 {
2392         int tid = id_to_int(act->get_tid());
2393         ModelAction *uninit = NULL;
2394         int uninit_id = -1;
2395         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2396         if (list->empty() && act->is_atomic_var()) {
2397                 uninit = get_uninitialized_action(act);
2398                 uninit_id = id_to_int(uninit->get_tid());
2399                 list->push_front(uninit);
2400         }
2401         list->push_back(act);
2402
2403         action_trace->push_back(act);
2404         if (uninit)
2405                 action_trace->push_front(uninit);
2406
2407         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2408         if (tid >= (int)vec->size())
2409                 vec->resize(priv->next_thread_id);
2410         (*vec)[tid].push_back(act);
2411         if (uninit)
2412                 (*vec)[uninit_id].push_front(uninit);
2413
2414         if ((int)thrd_last_action->size() <= tid)
2415                 thrd_last_action->resize(get_num_threads());
2416         (*thrd_last_action)[tid] = act;
2417         if (uninit)
2418                 (*thrd_last_action)[uninit_id] = uninit;
2419
2420         if (act->is_fence() && act->is_release()) {
2421                 if ((int)thrd_last_fence_release->size() <= tid)
2422                         thrd_last_fence_release->resize(get_num_threads());
2423                 (*thrd_last_fence_release)[tid] = act;
2424         }
2425
2426         if (act->is_wait()) {
2427                 void *mutex_loc = (void *) act->get_value();
2428                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2429
2430                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2431                 if (tid >= (int)vec->size())
2432                         vec->resize(priv->next_thread_id);
2433                 (*vec)[tid].push_back(act);
2434         }
2435 }
2436
2437 /**
2438  * @brief Get the last action performed by a particular Thread
2439  * @param tid The thread ID of the Thread in question
2440  * @return The last action in the thread
2441  */
2442 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2443 {
2444         int threadid = id_to_int(tid);
2445         if (threadid < (int)thrd_last_action->size())
2446                 return (*thrd_last_action)[id_to_int(tid)];
2447         else
2448                 return NULL;
2449 }
2450
2451 /**
2452  * @brief Get the last fence release performed by a particular Thread
2453  * @param tid The thread ID of the Thread in question
2454  * @return The last fence release in the thread, if one exists; NULL otherwise
2455  */
2456 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2457 {
2458         int threadid = id_to_int(tid);
2459         if (threadid < (int)thrd_last_fence_release->size())
2460                 return (*thrd_last_fence_release)[id_to_int(tid)];
2461         else
2462                 return NULL;
2463 }
2464
2465 /**
2466  * Gets the last memory_order_seq_cst write (in the total global sequence)
2467  * performed on a particular object (i.e., memory location), not including the
2468  * current action.
2469  * @param curr The current ModelAction; also denotes the object location to
2470  * check
2471  * @return The last seq_cst write
2472  */
2473 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2474 {
2475         void *location = curr->get_location();
2476         action_list_t *list = get_safe_ptr_action(obj_map, location);
2477         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2478         action_list_t::reverse_iterator rit;
2479         for (rit = list->rbegin(); (*rit) != curr; rit++)
2480                 ;
2481         rit++; /* Skip past curr */
2482         for ( ; rit != list->rend(); rit++)
2483                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2484                         return *rit;
2485         return NULL;
2486 }
2487
2488 /**
2489  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2490  * performed in a particular thread, prior to a particular fence.
2491  * @param tid The ID of the thread to check
2492  * @param before_fence The fence from which to begin the search; if NULL, then
2493  * search for the most recent fence in the thread.
2494  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2495  */
2496 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2497 {
2498         /* All fences should have NULL location */
2499         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2500         action_list_t::reverse_iterator rit = list->rbegin();
2501
2502         if (before_fence) {
2503                 for (; rit != list->rend(); rit++)
2504                         if (*rit == before_fence)
2505                                 break;
2506
2507                 ASSERT(*rit == before_fence);
2508                 rit++;
2509         }
2510
2511         for (; rit != list->rend(); rit++)
2512                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2513                         return *rit;
2514         return NULL;
2515 }
2516
2517 /**
2518  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2519  * location). This function identifies the mutex according to the current
2520  * action, which is presumed to perform on the same mutex.
2521  * @param curr The current ModelAction; also denotes the object location to
2522  * check
2523  * @return The last unlock operation
2524  */
2525 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2526 {
2527         void *location = curr->get_location();
2528         action_list_t *list = get_safe_ptr_action(obj_map, location);
2529         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2530         action_list_t::reverse_iterator rit;
2531         for (rit = list->rbegin(); rit != list->rend(); rit++)
2532                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2533                         return *rit;
2534         return NULL;
2535 }
2536
2537 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2538 {
2539         ModelAction *parent = get_last_action(tid);
2540         if (!parent)
2541                 parent = get_thread(tid)->get_creation();
2542         return parent;
2543 }
2544
2545 /**
2546  * Returns the clock vector for a given thread.
2547  * @param tid The thread whose clock vector we want
2548  * @return Desired clock vector
2549  */
2550 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2551 {
2552         return get_parent_action(tid)->get_cv();
2553 }
2554
2555 /**
2556  * @brief Find the promise (if any) to resolve for the current action and
2557  * remove it from the pending promise vector
2558  * @param curr The current ModelAction. Should be a write.
2559  * @return The Promise to resolve, if any; otherwise NULL
2560  */
2561 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2562 {
2563         for (unsigned int i = 0; i < promises->size(); i++)
2564                 if (curr->get_node()->get_promise(i)) {
2565                         Promise *ret = (*promises)[i];
2566                         promises->erase(promises->begin() + i);
2567                         return ret;
2568                 }
2569         return NULL;
2570 }
2571
2572 /**
2573  * Resolve a Promise with a current write.
2574  * @param write The ModelAction that is fulfilling Promises
2575  * @param promise The Promise to resolve
2576  * @return True if the Promise was successfully resolved; false otherwise
2577  */
2578 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2579 {
2580         ModelVector<ModelAction *> actions_to_check;
2581
2582         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2583                 ModelAction *read = promise->get_reader(i);
2584                 read_from(read, write);
2585                 actions_to_check.push_back(read);
2586         }
2587         /* Make sure the promise's value matches the write's value */
2588         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2589         if (!mo_graph->resolvePromise(promise, write))
2590                 priv->failed_promise = true;
2591
2592         /**
2593          * @todo  It is possible to end up in an inconsistent state, where a
2594          * "resolved" promise may still be referenced if
2595          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2596          *
2597          * Note that the inconsistency only matters when dumping mo_graph to
2598          * file.
2599          *
2600          * delete promise;
2601          */
2602
2603         //Check whether reading these writes has made threads unable to
2604         //resolve promises
2605         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2606                 ModelAction *read = actions_to_check[i];
2607                 mo_check_promises(read, true);
2608         }
2609
2610         return true;
2611 }
2612
2613 /**
2614  * Compute the set of promises that could potentially be satisfied by this
2615  * action. Note that the set computation actually appears in the Node, not in
2616  * ModelChecker.
2617  * @param curr The ModelAction that may satisfy promises
2618  */
2619 void ModelChecker::compute_promises(ModelAction *curr)
2620 {
2621         for (unsigned int i = 0; i < promises->size(); i++) {
2622                 Promise *promise = (*promises)[i];
2623                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2624                         continue;
2625
2626                 bool satisfy = true;
2627                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2628                         const ModelAction *act = promise->get_reader(j);
2629                         if (act->happens_before(curr) ||
2630                                         act->could_synchronize_with(curr)) {
2631                                 satisfy = false;
2632                                 break;
2633                         }
2634                 }
2635                 if (satisfy)
2636                         curr->get_node()->set_promise(i);
2637         }
2638 }
2639
2640 /** Checks promises in response to change in ClockVector Threads. */
2641 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2642 {
2643         for (unsigned int i = 0; i < promises->size(); i++) {
2644                 Promise *promise = (*promises)[i];
2645                 if (!promise->thread_is_available(tid))
2646                         continue;
2647                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2648                         const ModelAction *act = promise->get_reader(j);
2649                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2650                                         merge_cv->synchronized_since(act)) {
2651                                 if (promise->eliminate_thread(tid)) {
2652                                         /* Promise has failed */
2653                                         priv->failed_promise = true;
2654                                         return;
2655                                 }
2656                         }
2657                 }
2658         }
2659 }
2660
2661 void ModelChecker::check_promises_thread_disabled()
2662 {
2663         for (unsigned int i = 0; i < promises->size(); i++) {
2664                 Promise *promise = (*promises)[i];
2665                 if (promise->has_failed()) {
2666                         priv->failed_promise = true;
2667                         return;
2668                 }
2669         }
2670 }
2671
2672 /**
2673  * @brief Checks promises in response to addition to modification order for
2674  * threads.
2675  *
2676  * We test whether threads are still available for satisfying promises after an
2677  * addition to our modification order constraints. Those that are unavailable
2678  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2679  * that promise has failed.
2680  *
2681  * @param act The ModelAction which updated the modification order
2682  * @param is_read_check Should be true if act is a read and we must check for
2683  * updates to the store from which it read (there is a distinction here for
2684  * RMW's, which are both a load and a store)
2685  */
2686 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2687 {
2688         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2689
2690         for (unsigned int i = 0; i < promises->size(); i++) {
2691                 Promise *promise = (*promises)[i];
2692
2693                 // Is this promise on the same location?
2694                 if (!promise->same_location(write))
2695                         continue;
2696
2697                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2698                         const ModelAction *pread = promise->get_reader(j);
2699                         if (!pread->happens_before(act))
2700                                continue;
2701                         if (mo_graph->checkPromise(write, promise)) {
2702                                 priv->failed_promise = true;
2703                                 return;
2704                         }
2705                         break;
2706                 }
2707
2708                 // Don't do any lookups twice for the same thread
2709                 if (!promise->thread_is_available(act->get_tid()))
2710                         continue;
2711
2712                 if (mo_graph->checkReachable(promise, write)) {
2713                         if (mo_graph->checkPromise(write, promise)) {
2714                                 priv->failed_promise = true;
2715                                 return;
2716                         }
2717                 }
2718         }
2719 }
2720
2721 /**
2722  * Compute the set of writes that may break the current pending release
2723  * sequence. This information is extracted from previou release sequence
2724  * calculations.
2725  *
2726  * @param curr The current ModelAction. Must be a release sequence fixup
2727  * action.
2728  */
2729 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2730 {
2731         if (pending_rel_seqs->empty())
2732                 return;
2733
2734         struct release_seq *pending = pending_rel_seqs->back();
2735         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2736                 const ModelAction *write = pending->writes[i];
2737                 curr->get_node()->add_relseq_break(write);
2738         }
2739
2740         /* NULL means don't break the sequence; just synchronize */
2741         curr->get_node()->add_relseq_break(NULL);
2742 }
2743
2744 /**
2745  * Build up an initial set of all past writes that this 'read' action may read
2746  * from, as well as any previously-observed future values that must still be valid.
2747  *
2748  * @param curr is the current ModelAction that we are exploring; it must be a
2749  * 'read' operation.
2750  */
2751 void ModelChecker::build_may_read_from(ModelAction *curr)
2752 {
2753         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2754         unsigned int i;
2755         ASSERT(curr->is_read());
2756
2757         ModelAction *last_sc_write = NULL;
2758
2759         if (curr->is_seqcst())
2760                 last_sc_write = get_last_seq_cst_write(curr);
2761
2762         /* Iterate over all threads */
2763         for (i = 0; i < thrd_lists->size(); i++) {
2764                 /* Iterate over actions in thread, starting from most recent */
2765                 action_list_t *list = &(*thrd_lists)[i];
2766                 action_list_t::reverse_iterator rit;
2767                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2768                         ModelAction *act = *rit;
2769
2770                         /* Only consider 'write' actions */
2771                         if (!act->is_write() || act == curr)
2772                                 continue;
2773
2774                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2775                         bool allow_read = true;
2776
2777                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2778                                 allow_read = false;
2779                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2780                                 allow_read = false;
2781
2782                         if (allow_read) {
2783                                 /* Only add feasible reads */
2784                                 mo_graph->startChanges();
2785                                 r_modification_order(curr, act);
2786                                 if (!is_infeasible())
2787                                         curr->get_node()->add_read_from_past(act);
2788                                 mo_graph->rollbackChanges();
2789                         }
2790
2791                         /* Include at most one act per-thread that "happens before" curr */
2792                         if (act->happens_before(curr))
2793                                 break;
2794                 }
2795         }
2796
2797         /* Inherit existing, promised future values */
2798         for (i = 0; i < promises->size(); i++) {
2799                 const Promise *promise = (*promises)[i];
2800                 const ModelAction *promise_read = promise->get_reader(0);
2801                 if (promise_read->same_var(curr)) {
2802                         /* Only add feasible future-values */
2803                         mo_graph->startChanges();
2804                         r_modification_order(curr, promise);
2805                         if (!is_infeasible())
2806                                 curr->get_node()->add_read_from_promise(promise_read);
2807                         mo_graph->rollbackChanges();
2808                 }
2809         }
2810
2811         /* We may find no valid may-read-from only if the execution is doomed */
2812         if (!curr->get_node()->read_from_size()) {
2813                 priv->no_valid_reads = true;
2814                 set_assert();
2815         }
2816
2817         if (DBG_ENABLED()) {
2818                 model_print("Reached read action:\n");
2819                 curr->print();
2820                 model_print("Printing read_from_past\n");
2821                 curr->get_node()->print_read_from_past();
2822                 model_print("End printing read_from_past\n");
2823         }
2824 }
2825
2826 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2827 {
2828         for ( ; write != NULL; write = write->get_reads_from()) {
2829                 /* UNINIT actions don't have a Node, and they never sleep */
2830                 if (write->is_uninitialized())
2831                         return true;
2832                 Node *prevnode = write->get_node()->get_parent();
2833
2834                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2835                 if (write->is_release() && thread_sleep)
2836                         return true;
2837                 if (!write->is_rmw())
2838                         return false;
2839         }
2840         return true;
2841 }
2842
2843 /**
2844  * @brief Get an action representing an uninitialized atomic
2845  *
2846  * This function may create a new one or try to retrieve one from the NodeStack
2847  *
2848  * @param curr The current action, which prompts the creation of an UNINIT action
2849  * @return A pointer to the UNINIT ModelAction
2850  */
2851 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2852 {
2853         Node *node = curr->get_node();
2854         ModelAction *act = node->get_uninit_action();
2855         if (!act) {
2856                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2857                 node->set_uninit_action(act);
2858         }
2859         act->create_cv(NULL);
2860         return act;
2861 }
2862
2863 static void print_list(action_list_t *list)
2864 {
2865         action_list_t::iterator it;
2866
2867         model_print("---------------------------------------------------------------------\n");
2868
2869         unsigned int hash = 0;
2870
2871         for (it = list->begin(); it != list->end(); it++) {
2872                 const ModelAction *act = *it;
2873                 if (act->get_seq_number() > 0)
2874                         act->print();
2875                 hash = hash^(hash<<3)^((*it)->hash());
2876         }
2877         model_print("HASH %u\n", hash);
2878         model_print("---------------------------------------------------------------------\n");
2879 }
2880
2881 #if SUPPORT_MOD_ORDER_DUMP
2882 void ModelChecker::dumpGraph(char *filename) const
2883 {
2884         char buffer[200];
2885         sprintf(buffer, "%s.dot", filename);
2886         FILE *file = fopen(buffer, "w");
2887         fprintf(file, "digraph %s {\n", filename);
2888         mo_graph->dumpNodes(file);
2889         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2890
2891         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2892                 ModelAction *act = *it;
2893                 if (act->is_read()) {
2894                         mo_graph->dot_print_node(file, act);
2895                         if (act->get_reads_from())
2896                                 mo_graph->dot_print_edge(file,
2897                                                 act->get_reads_from(),
2898                                                 act,
2899                                                 "label=\"rf\", color=red, weight=2");
2900                         else
2901                                 mo_graph->dot_print_edge(file,
2902                                                 act->get_reads_from_promise(),
2903                                                 act,
2904                                                 "label=\"rf\", color=red");
2905                 }
2906                 if (thread_array[act->get_tid()]) {
2907                         mo_graph->dot_print_edge(file,
2908                                         thread_array[id_to_int(act->get_tid())],
2909                                         act,
2910                                         "label=\"sb\", color=blue, weight=400");
2911                 }
2912
2913                 thread_array[act->get_tid()] = act;
2914         }
2915         fprintf(file, "}\n");
2916         model_free(thread_array);
2917         fclose(file);
2918 }
2919 #endif
2920
2921 /** @brief Prints an execution trace summary. */
2922 void ModelChecker::print_summary() const
2923 {
2924 #if SUPPORT_MOD_ORDER_DUMP
2925         char buffername[100];
2926         sprintf(buffername, "exec%04u", stats.num_total);
2927         mo_graph->dumpGraphToFile(buffername);
2928         sprintf(buffername, "graph%04u", stats.num_total);
2929         dumpGraph(buffername);
2930 #endif
2931
2932         model_print("Execution %d:", stats.num_total);
2933         if (isfeasibleprefix()) {
2934                 if (scheduler->all_threads_sleeping())
2935                         model_print(" SLEEP-SET REDUNDANT");
2936                 model_print("\n");
2937         } else
2938                 print_infeasibility(" INFEASIBLE");
2939         print_list(action_trace);
2940         model_print("\n");
2941         if (!promises->empty()) {
2942                 model_print("Pending promises:\n");
2943                 for (unsigned int i = 0; i < promises->size(); i++) {
2944                         model_print(" [P%u] ", i);
2945                         (*promises)[i]->print();
2946                 }
2947                 model_print("\n");
2948         }
2949 }
2950
2951 /**
2952  * Add a Thread to the system for the first time. Should only be called once
2953  * per thread.
2954  * @param t The Thread to add
2955  */
2956 void ModelChecker::add_thread(Thread *t)
2957 {
2958         thread_map->put(id_to_int(t->get_id()), t);
2959         scheduler->add_thread(t);
2960 }
2961
2962 /**
2963  * @brief Get a Thread reference by its ID
2964  * @param tid The Thread's ID
2965  * @return A Thread reference
2966  */
2967 Thread * ModelChecker::get_thread(thread_id_t tid) const
2968 {
2969         return thread_map->get(id_to_int(tid));
2970 }
2971
2972 /**
2973  * @brief Get a reference to the Thread in which a ModelAction was executed
2974  * @param act The ModelAction
2975  * @return A Thread reference
2976  */
2977 Thread * ModelChecker::get_thread(const ModelAction *act) const
2978 {
2979         return get_thread(act->get_tid());
2980 }
2981
2982 /**
2983  * @brief Get a Promise's "promise number"
2984  *
2985  * A "promise number" is an index number that is unique to a promise, valid
2986  * only for a specific snapshot of an execution trace. Promises may come and go
2987  * as they are generated an resolved, so an index only retains meaning for the
2988  * current snapshot.
2989  *
2990  * @param promise The Promise to check
2991  * @return The promise index, if the promise still is valid; otherwise -1
2992  */
2993 int ModelChecker::get_promise_number(const Promise *promise) const
2994 {
2995         for (unsigned int i = 0; i < promises->size(); i++)
2996                 if ((*promises)[i] == promise)
2997                         return i;
2998         /* Not found */
2999         return -1;
3000 }
3001
3002 /**
3003  * @brief Check if a Thread is currently enabled
3004  * @param t The Thread to check
3005  * @return True if the Thread is currently enabled
3006  */
3007 bool ModelChecker::is_enabled(Thread *t) const
3008 {
3009         return scheduler->is_enabled(t);
3010 }
3011
3012 /**
3013  * @brief Check if a Thread is currently enabled
3014  * @param tid The ID of the Thread to check
3015  * @return True if the Thread is currently enabled
3016  */
3017 bool ModelChecker::is_enabled(thread_id_t tid) const
3018 {
3019         return scheduler->is_enabled(tid);
3020 }
3021
3022 /**
3023  * Switch from a model-checker context to a user-thread context. This is the
3024  * complement of ModelChecker::switch_to_master and must be called from the
3025  * model-checker context
3026  *
3027  * @param thread The user-thread to switch to
3028  */
3029 void ModelChecker::switch_from_master(Thread *thread)
3030 {
3031         scheduler->set_current_thread(thread);
3032         Thread::swap(&system_context, thread);
3033 }
3034
3035 /**
3036  * Switch from a user-context to the "master thread" context (a.k.a. system
3037  * context). This switch is made with the intention of exploring a particular
3038  * model-checking action (described by a ModelAction object). Must be called
3039  * from a user-thread context.
3040  *
3041  * @param act The current action that will be explored. May be NULL only if
3042  * trace is exiting via an assertion (see ModelChecker::set_assert and
3043  * ModelChecker::has_asserted).
3044  * @return Return the value returned by the current action
3045  */
3046 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3047 {
3048         DBG();
3049         Thread *old = thread_current();
3050         scheduler->set_current_thread(NULL);
3051         ASSERT(!old->get_pending());
3052         old->set_pending(act);
3053         if (Thread::swap(old, &system_context) < 0) {
3054                 perror("swap threads");
3055                 exit(EXIT_FAILURE);
3056         }
3057         return old->get_return_value();
3058 }
3059
3060 /**
3061  * Takes the next step in the execution, if possible.
3062  * @param curr The current step to take
3063  * @return Returns the next Thread to run, if any; NULL if this execution
3064  * should terminate
3065  */
3066 Thread * ModelChecker::take_step(ModelAction *curr)
3067 {
3068         Thread *curr_thrd = get_thread(curr);
3069         ASSERT(curr_thrd->get_state() == THREAD_READY);
3070
3071         if (!check_action_enabled(curr)) {
3072                 /* Make the execution look like we chose to run this action
3073                  * much later, when a lock/join can succeed */
3074                 get_thread(curr)->set_pending(curr);
3075                 scheduler->sleep(curr_thrd);
3076                 curr = NULL;
3077         } else {
3078                 curr = check_current_action(curr);
3079                 ASSERT(curr);
3080         }
3081
3082         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3083                 scheduler->remove_thread(curr_thrd);
3084
3085         if (curr)
3086                 return action_select_next_thread(curr);
3087         return NULL;
3088 }
3089
3090 /** Wrapper to run the user's main function, with appropriate arguments */
3091 void user_main_wrapper(void *)
3092 {
3093         user_main(model->params.argc, model->params.argv);
3094 }
3095
3096 bool ModelChecker::should_terminate_execution()
3097 {
3098         /* Infeasible -> don't take any more steps */
3099         if (is_infeasible())
3100                 return true;
3101         else if (isfeasibleprefix() && have_bug_reports()) {
3102                 set_assert();
3103                 return true;
3104         }
3105
3106         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3107                 return true;
3108         return false;
3109 }
3110
3111 /** @brief Run ModelChecker for the user program */
3112 void ModelChecker::run()
3113 {
3114         do {
3115                 thrd_t user_thread;
3116                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3117                 add_thread(t);
3118
3119                 do {
3120                         /*
3121                          * Stash next pending action(s) for thread(s). There
3122                          * should only need to stash one thread's action--the
3123                          * thread which just took a step--plus the first step
3124                          * for any newly-created thread
3125                          */
3126                         for (unsigned int i = 0; i < get_num_threads(); i++) {
3127                                 thread_id_t tid = int_to_id(i);
3128                                 Thread *thr = get_thread(tid);
3129                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3130                                         switch_from_master(thr);
3131                                         if (thr->is_waiting_on(thr))
3132                                                 assert_bug("Deadlock detected");
3133                                 }
3134                         }
3135
3136                         /* Catch assertions from prior take_step or from
3137                          * between-ModelAction bugs (e.g., data races) */
3138                         if (has_asserted())
3139                                 break;
3140
3141                         if (!t)
3142                                 t = get_next_thread();
3143                         if (!t || t->is_model_thread())
3144                                 break;
3145
3146                         /* Consume the next action for a Thread */
3147                         ModelAction *curr = t->get_pending();
3148                         t->set_pending(NULL);
3149                         t = take_step(curr);
3150                 } while (!should_terminate_execution());
3151
3152                 /*
3153                  * Launch end-of-execution release sequence fixups only when
3154                  * the execution is otherwise feasible AND there are:
3155                  *
3156                  * (1) pending release sequences
3157                  * (2) pending assertions that could be invalidated by a change
3158                  * in clock vectors (i.e., data races)
3159                  * (3) no pending promises
3160                  */
3161                 while (!pending_rel_seqs->empty() &&
3162                                 is_feasible_prefix_ignore_relseq() &&
3163                                 !unrealizedraces.empty()) {
3164                         model_print("*** WARNING: release sequence fixup action "
3165                                         "(%zu pending release seuqence(s)) ***\n",
3166                                         pending_rel_seqs->size());
3167                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3168                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3169                                         model_thread);
3170                         take_step(fixup);
3171                 };
3172         } while (next_execution());
3173
3174         model_print("******* Model-checking complete: *******\n");
3175         print_stats();
3176 }