f0288d2e7de1eb2d23bc32c95a3c01fb8c7d1683
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "bugmessage.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 /**
22  * Structure for holding small ModelChecker members that should be snapshotted
23  */
24 struct model_snapshot_members {
25         model_snapshot_members() :
26                 /* First thread created will have id INITIAL_THREAD_ID */
27                 next_thread_id(INITIAL_THREAD_ID),
28                 used_sequence_numbers(0),
29                 next_backtrack(NULL),
30                 bugs(),
31                 failed_promise(false),
32                 too_many_reads(false),
33                 no_valid_reads(false),
34                 bad_synchronization(false),
35                 asserted(false)
36         { }
37
38         ~model_snapshot_members() {
39                 for (unsigned int i = 0; i < bugs.size(); i++)
40                         delete bugs[i];
41                 bugs.clear();
42         }
43
44         unsigned int next_thread_id;
45         modelclock_t used_sequence_numbers;
46         ModelAction *next_backtrack;
47         SnapVector<bug_message *> bugs;
48         bool failed_promise;
49         bool too_many_reads;
50         bool no_valid_reads;
51         /** @brief Incorrectly-ordered synchronization was made */
52         bool bad_synchronization;
53         bool asserted;
54
55         SNAPSHOTALLOC
56 };
57
58 /** @brief Constructor */
59 ModelExecution::ModelExecution(struct model_params *params, Scheduler *scheduler, NodeStack *node_stack) :
60         params(params),
61         scheduler(scheduler),
62         action_trace(new action_list_t()),
63         thread_map(new HashTable<int, Thread *, int>()),
64         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
65         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
66         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
67         promises(new SnapVector<Promise *>()),
68         futurevalues(new SnapVector<struct PendingFutureValue>()),
69         pending_rel_seqs(new SnapVector<struct release_seq *>()),
70         thrd_last_action(new SnapVector<ModelAction *>(1)),
71         thrd_last_fence_release(new SnapVector<ModelAction *>()),
72         node_stack(node_stack),
73         priv(new struct model_snapshot_members()),
74         mo_graph(new CycleGraph()),
75         execution_number(1)
76 {
77         /* Initialize a model-checker thread, for special ModelActions */
78         model_thread = new Thread(get_next_id());
79         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
80         scheduler->register_engine(this);
81 }
82
83 /** @brief Destructor */
84 ModelExecution::~ModelExecution()
85 {
86         for (unsigned int i = 0; i < get_num_threads(); i++)
87                 delete thread_map->get(i);
88         delete thread_map;
89
90         delete obj_thrd_map;
91         delete obj_map;
92         delete condvar_waiters_map;
93         delete action_trace;
94
95         for (unsigned int i = 0; i < promises->size(); i++)
96                 delete (*promises)[i];
97         delete promises;
98
99         delete pending_rel_seqs;
100
101         delete thrd_last_action;
102         delete thrd_last_fence_release;
103         delete mo_graph;
104         delete priv;
105 }
106
107 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
108 {
109         action_list_t *tmp = hash->get(ptr);
110         if (tmp == NULL) {
111                 tmp = new action_list_t();
112                 hash->put(ptr, tmp);
113         }
114         return tmp;
115 }
116
117 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
118 {
119         SnapVector<action_list_t> *tmp = hash->get(ptr);
120         if (tmp == NULL) {
121                 tmp = new SnapVector<action_list_t>();
122                 hash->put(ptr, tmp);
123         }
124         return tmp;
125 }
126
127 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
128 {
129         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
130         if (wrv==NULL)
131                 return NULL;
132         unsigned int thread=id_to_int(tid);
133         if (thread < wrv->size())
134                 return &(*wrv)[thread];
135         else
136                 return NULL;
137 }
138
139 /** @return a thread ID for a new Thread */
140 thread_id_t ModelExecution::get_next_id()
141 {
142         return priv->next_thread_id++;
143 }
144
145 /** @return the number of user threads created during this execution */
146 unsigned int ModelExecution::get_num_threads() const
147 {
148         return priv->next_thread_id;
149 }
150
151 /** @return a sequence number for a new ModelAction */
152 modelclock_t ModelExecution::get_next_seq_num()
153 {
154         return ++priv->used_sequence_numbers;
155 }
156
157 /**
158  * @brief Should the current action wake up a given thread?
159  *
160  * @param curr The current action
161  * @param thread The thread that we might wake up
162  * @return True, if we should wake up the sleeping thread; false otherwise
163  */
164 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
165 {
166         const ModelAction *asleep = thread->get_pending();
167         /* Don't allow partial RMW to wake anyone up */
168         if (curr->is_rmwr())
169                 return false;
170         /* Synchronizing actions may have been backtracked */
171         if (asleep->could_synchronize_with(curr))
172                 return true;
173         /* All acquire/release fences and fence-acquire/store-release */
174         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
175                 return true;
176         /* Fence-release + store can awake load-acquire on the same location */
177         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
178                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
179                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
180                         return true;
181         }
182         return false;
183 }
184
185 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
186 {
187         for (unsigned int i = 0; i < get_num_threads(); i++) {
188                 Thread *thr = get_thread(int_to_id(i));
189                 if (scheduler->is_sleep_set(thr)) {
190                         if (should_wake_up(curr, thr))
191                                 /* Remove this thread from sleep set */
192                                 scheduler->remove_sleep(thr);
193                 }
194         }
195 }
196
197 /** @brief Alert the model-checker that an incorrectly-ordered
198  * synchronization was made */
199 void ModelExecution::set_bad_synchronization()
200 {
201         priv->bad_synchronization = true;
202 }
203
204 bool ModelExecution::assert_bug(const char *msg)
205 {
206         priv->bugs.push_back(new bug_message(msg));
207
208         if (isfeasibleprefix()) {
209                 set_assert();
210                 return true;
211         }
212         return false;
213 }
214
215 /** @return True, if any bugs have been reported for this execution */
216 bool ModelExecution::have_bug_reports() const
217 {
218         return priv->bugs.size() != 0;
219 }
220
221 SnapVector<bug_message *> * ModelExecution::get_bugs() const
222 {
223         return &priv->bugs;
224 }
225
226 /**
227  * Check whether the current trace has triggered an assertion which should halt
228  * its execution.
229  *
230  * @return True, if the execution should be aborted; false otherwise
231  */
232 bool ModelExecution::has_asserted() const
233 {
234         return priv->asserted;
235 }
236
237 /**
238  * Trigger a trace assertion which should cause this execution to be halted.
239  * This can be due to a detected bug or due to an infeasibility that should
240  * halt ASAP.
241  */
242 void ModelExecution::set_assert()
243 {
244         priv->asserted = true;
245 }
246
247 /**
248  * Check if we are in a deadlock. Should only be called at the end of an
249  * execution, although it should not give false positives in the middle of an
250  * execution (there should be some ENABLED thread).
251  *
252  * @return True if program is in a deadlock; false otherwise
253  */
254 bool ModelExecution::is_deadlocked() const
255 {
256         bool blocking_threads = false;
257         for (unsigned int i = 0; i < get_num_threads(); i++) {
258                 thread_id_t tid = int_to_id(i);
259                 if (is_enabled(tid))
260                         return false;
261                 Thread *t = get_thread(tid);
262                 if (!t->is_model_thread() && t->get_pending())
263                         blocking_threads = true;
264         }
265         return blocking_threads;
266 }
267
268 /**
269  * Check if this is a complete execution. That is, have all thread completed
270  * execution (rather than exiting because sleep sets have forced a redundant
271  * execution).
272  *
273  * @return True if the execution is complete.
274  */
275 bool ModelExecution::is_complete_execution() const
276 {
277         for (unsigned int i = 0; i < get_num_threads(); i++)
278                 if (is_enabled(int_to_id(i)))
279                         return false;
280         return true;
281 }
282
283 /**
284  * @brief Find the last fence-related backtracking conflict for a ModelAction
285  *
286  * This function performs the search for the most recent conflicting action
287  * against which we should perform backtracking, as affected by fence
288  * operations. This includes pairs of potentially-synchronizing actions which
289  * occur due to fence-acquire or fence-release, and hence should be explored in
290  * the opposite execution order.
291  *
292  * @param act The current action
293  * @return The most recent action which conflicts with act due to fences
294  */
295 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
296 {
297         /* Only perform release/acquire fence backtracking for stores */
298         if (!act->is_write())
299                 return NULL;
300
301         /* Find a fence-release (or, act is a release) */
302         ModelAction *last_release;
303         if (act->is_release())
304                 last_release = act;
305         else
306                 last_release = get_last_fence_release(act->get_tid());
307         if (!last_release)
308                 return NULL;
309
310         /* Skip past the release */
311         action_list_t *list = action_trace;
312         action_list_t::reverse_iterator rit;
313         for (rit = list->rbegin(); rit != list->rend(); rit++)
314                 if (*rit == last_release)
315                         break;
316         ASSERT(rit != list->rend());
317
318         /* Find a prior:
319          *   load-acquire
320          * or
321          *   load --sb-> fence-acquire */
322         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
323         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
324         bool found_acquire_fences = false;
325         for ( ; rit != list->rend(); rit++) {
326                 ModelAction *prev = *rit;
327                 if (act->same_thread(prev))
328                         continue;
329
330                 int tid = id_to_int(prev->get_tid());
331
332                 if (prev->is_read() && act->same_var(prev)) {
333                         if (prev->is_acquire()) {
334                                 /* Found most recent load-acquire, don't need
335                                  * to search for more fences */
336                                 if (!found_acquire_fences)
337                                         return NULL;
338                         } else {
339                                 prior_loads[tid] = prev;
340                         }
341                 }
342                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
343                         found_acquire_fences = true;
344                         acquire_fences[tid] = prev;
345                 }
346         }
347
348         ModelAction *latest_backtrack = NULL;
349         for (unsigned int i = 0; i < acquire_fences.size(); i++)
350                 if (acquire_fences[i] && prior_loads[i])
351                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
352                                 latest_backtrack = acquire_fences[i];
353         return latest_backtrack;
354 }
355
356 /**
357  * @brief Find the last backtracking conflict for a ModelAction
358  *
359  * This function performs the search for the most recent conflicting action
360  * against which we should perform backtracking. This primary includes pairs of
361  * synchronizing actions which should be explored in the opposite execution
362  * order.
363  *
364  * @param act The current action
365  * @return The most recent action which conflicts with act
366  */
367 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
368 {
369         switch (act->get_type()) {
370         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
371         case ATOMIC_READ:
372         case ATOMIC_WRITE:
373         case ATOMIC_RMW: {
374                 ModelAction *ret = NULL;
375
376                 /* linear search: from most recent to oldest */
377                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
378                 action_list_t::reverse_iterator rit;
379                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
380                         ModelAction *prev = *rit;
381                         if (prev->could_synchronize_with(act)) {
382                                 ret = prev;
383                                 break;
384                         }
385                 }
386
387                 ModelAction *ret2 = get_last_fence_conflict(act);
388                 if (!ret2)
389                         return ret;
390                 if (!ret)
391                         return ret2;
392                 if (*ret < *ret2)
393                         return ret2;
394                 return ret;
395         }
396         case ATOMIC_LOCK:
397         case ATOMIC_TRYLOCK: {
398                 /* linear search: from most recent to oldest */
399                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
400                 action_list_t::reverse_iterator rit;
401                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
402                         ModelAction *prev = *rit;
403                         if (act->is_conflicting_lock(prev))
404                                 return prev;
405                 }
406                 break;
407         }
408         case ATOMIC_UNLOCK: {
409                 /* linear search: from most recent to oldest */
410                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
411                 action_list_t::reverse_iterator rit;
412                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
413                         ModelAction *prev = *rit;
414                         if (!act->same_thread(prev) && prev->is_failed_trylock())
415                                 return prev;
416                 }
417                 break;
418         }
419         case ATOMIC_WAIT: {
420                 /* linear search: from most recent to oldest */
421                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
422                 action_list_t::reverse_iterator rit;
423                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
424                         ModelAction *prev = *rit;
425                         if (!act->same_thread(prev) && prev->is_failed_trylock())
426                                 return prev;
427                         if (!act->same_thread(prev) && prev->is_notify())
428                                 return prev;
429                 }
430                 break;
431         }
432
433         case ATOMIC_NOTIFY_ALL:
434         case ATOMIC_NOTIFY_ONE: {
435                 /* linear search: from most recent to oldest */
436                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
437                 action_list_t::reverse_iterator rit;
438                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
439                         ModelAction *prev = *rit;
440                         if (!act->same_thread(prev) && prev->is_wait())
441                                 return prev;
442                 }
443                 break;
444         }
445         default:
446                 break;
447         }
448         return NULL;
449 }
450
451 /** This method finds backtracking points where we should try to
452  * reorder the parameter ModelAction against.
453  *
454  * @param the ModelAction to find backtracking points for.
455  */
456 void ModelExecution::set_backtracking(ModelAction *act)
457 {
458         Thread *t = get_thread(act);
459         ModelAction *prev = get_last_conflict(act);
460         if (prev == NULL)
461                 return;
462
463         Node *node = prev->get_node()->get_parent();
464
465         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
466         int low_tid, high_tid;
467         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
468                 low_tid = id_to_int(act->get_tid());
469                 high_tid = low_tid + 1;
470         } else {
471                 low_tid = 0;
472                 high_tid = get_num_threads();
473         }
474
475         for (int i = low_tid; i < high_tid; i++) {
476                 thread_id_t tid = int_to_id(i);
477
478                 /* Make sure this thread can be enabled here. */
479                 if (i >= node->get_num_threads())
480                         break;
481
482                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
483                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
484                 if (node->enabled_status(tid) != THREAD_ENABLED)
485                         continue;
486
487                 /* Check if this has been explored already */
488                 if (node->has_been_explored(tid))
489                         continue;
490
491                 /* See if fairness allows */
492                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
493                         bool unfair = false;
494                         for (int t = 0; t < node->get_num_threads(); t++) {
495                                 thread_id_t tother = int_to_id(t);
496                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
497                                         unfair = true;
498                                         break;
499                                 }
500                         }
501                         if (unfair)
502                                 continue;
503                 }
504
505                 /* See if CHESS-like yield fairness allows */
506                 if (params->yieldon) {
507                         bool unfair = false;
508                         for (int t = 0; t < node->get_num_threads(); t++) {
509                                 thread_id_t tother = int_to_id(t);
510                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
511                                         unfair = true;
512                                         break;
513                                 }
514                         }
515                         if (unfair)
516                                 continue;
517                 }
518
519                 /* Cache the latest backtracking point */
520                 set_latest_backtrack(prev);
521
522                 /* If this is a new backtracking point, mark the tree */
523                 if (!node->set_backtrack(tid))
524                         continue;
525                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
526                                         id_to_int(prev->get_tid()),
527                                         id_to_int(t->get_id()));
528                 if (DBG_ENABLED()) {
529                         prev->print();
530                         act->print();
531                 }
532         }
533 }
534
535 /**
536  * @brief Cache the a backtracking point as the "most recent", if eligible
537  *
538  * Note that this does not prepare the NodeStack for this backtracking
539  * operation, it only caches the action on a per-execution basis
540  *
541  * @param act The operation at which we should explore a different next action
542  * (i.e., backtracking point)
543  * @return True, if this action is now the most recent backtracking point;
544  * false otherwise
545  */
546 bool ModelExecution::set_latest_backtrack(ModelAction *act)
547 {
548         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
549                 priv->next_backtrack = act;
550                 return true;
551         }
552         return false;
553 }
554
555 /**
556  * Returns last backtracking point. The model checker will explore a different
557  * path for this point in the next execution.
558  * @return The ModelAction at which the next execution should diverge.
559  */
560 ModelAction * ModelExecution::get_next_backtrack()
561 {
562         ModelAction *next = priv->next_backtrack;
563         priv->next_backtrack = NULL;
564         return next;
565 }
566
567 /**
568  * Processes a read model action.
569  * @param curr is the read model action to process.
570  * @return True if processing this read updates the mo_graph.
571  */
572 bool ModelExecution::process_read(ModelAction *curr)
573 {
574         Node *node = curr->get_node();
575         while (true) {
576                 bool updated = false;
577                 switch (node->get_read_from_status()) {
578                 case READ_FROM_PAST: {
579                         const ModelAction *rf = node->get_read_from_past();
580                         ASSERT(rf);
581
582                         mo_graph->startChanges();
583
584                         ASSERT(!is_infeasible());
585                         if (!check_recency(curr, rf)) {
586                                 if (node->increment_read_from()) {
587                                         mo_graph->rollbackChanges();
588                                         continue;
589                                 } else {
590                                         priv->too_many_reads = true;
591                                 }
592                         }
593
594                         updated = r_modification_order(curr, rf);
595                         read_from(curr, rf);
596                         mo_graph->commitChanges();
597                         mo_check_promises(curr, true);
598                         break;
599                 }
600                 case READ_FROM_PROMISE: {
601                         Promise *promise = curr->get_node()->get_read_from_promise();
602                         if (promise->add_reader(curr))
603                                 priv->failed_promise = true;
604                         curr->set_read_from_promise(promise);
605                         mo_graph->startChanges();
606                         if (!check_recency(curr, promise))
607                                 priv->too_many_reads = true;
608                         updated = r_modification_order(curr, promise);
609                         mo_graph->commitChanges();
610                         break;
611                 }
612                 case READ_FROM_FUTURE: {
613                         /* Read from future value */
614                         struct future_value fv = node->get_future_value();
615                         Promise *promise = new Promise(this, curr, fv);
616                         curr->set_read_from_promise(promise);
617                         promises->push_back(promise);
618                         mo_graph->startChanges();
619                         updated = r_modification_order(curr, promise);
620                         mo_graph->commitChanges();
621                         break;
622                 }
623                 default:
624                         ASSERT(false);
625                 }
626                 get_thread(curr)->set_return_value(curr->get_return_value());
627                 return updated;
628         }
629 }
630
631 /**
632  * Processes a lock, trylock, or unlock model action.  @param curr is
633  * the read model action to process.
634  *
635  * The try lock operation checks whether the lock is taken.  If not,
636  * it falls to the normal lock operation case.  If so, it returns
637  * fail.
638  *
639  * The lock operation has already been checked that it is enabled, so
640  * it just grabs the lock and synchronizes with the previous unlock.
641  *
642  * The unlock operation has to re-enable all of the threads that are
643  * waiting on the lock.
644  *
645  * @return True if synchronization was updated; false otherwise
646  */
647 bool ModelExecution::process_mutex(ModelAction *curr)
648 {
649         std::mutex *mutex = curr->get_mutex();
650         struct std::mutex_state *state = NULL;
651
652         if (mutex)
653                 state = mutex->get_state();
654
655         switch (curr->get_type()) {
656         case ATOMIC_TRYLOCK: {
657                 bool success = !state->locked;
658                 curr->set_try_lock(success);
659                 if (!success) {
660                         get_thread(curr)->set_return_value(0);
661                         break;
662                 }
663                 get_thread(curr)->set_return_value(1);
664         }
665                 //otherwise fall into the lock case
666         case ATOMIC_LOCK: {
667                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
668                         assert_bug("Lock access before initialization");
669                 state->locked = get_thread(curr);
670                 ModelAction *unlock = get_last_unlock(curr);
671                 //synchronize with the previous unlock statement
672                 if (unlock != NULL) {
673                         synchronize(unlock, curr);
674                         return true;
675                 }
676                 break;
677         }
678         case ATOMIC_WAIT:
679         case ATOMIC_UNLOCK: {
680                 /* wake up the other threads */
681                 for (unsigned int i = 0; i < get_num_threads(); i++) {
682                         Thread *t = get_thread(int_to_id(i));
683                         Thread *curr_thrd = get_thread(curr);
684                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
685                                 scheduler->wake(t);
686                 }
687
688                 /* unlock the lock - after checking who was waiting on it */
689                 state->locked = NULL;
690
691                 if (!curr->is_wait())
692                         break; /* The rest is only for ATOMIC_WAIT */
693
694                 /* Should we go to sleep? (simulate spurious failures) */
695                 if (curr->get_node()->get_misc() == 0) {
696                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
697                         /* disable us */
698                         scheduler->sleep(get_thread(curr));
699                 }
700                 break;
701         }
702         case ATOMIC_NOTIFY_ALL: {
703                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
704                 //activate all the waiting threads
705                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
706                         scheduler->wake(get_thread(*rit));
707                 }
708                 waiters->clear();
709                 break;
710         }
711         case ATOMIC_NOTIFY_ONE: {
712                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
713                 int wakeupthread = curr->get_node()->get_misc();
714                 action_list_t::iterator it = waiters->begin();
715                 advance(it, wakeupthread);
716                 scheduler->wake(get_thread(*it));
717                 waiters->erase(it);
718                 break;
719         }
720
721         default:
722                 ASSERT(0);
723         }
724         return false;
725 }
726
727 /**
728  * @brief Check if the current pending promises allow a future value to be sent
729  *
730  * If one of the following is true:
731  *  (a) there are no pending promises
732  *  (b) the reader and writer do not cross any promises
733  * Then, it is safe to pass a future value back now.
734  *
735  * Otherwise, we must save the pending future value until (a) or (b) is true
736  *
737  * @param writer The operation which sends the future value. Must be a write.
738  * @param reader The operation which will observe the value. Must be a read.
739  * @return True if the future value can be sent now; false if it must wait.
740  */
741 bool ModelExecution::promises_may_allow(const ModelAction *writer,
742                 const ModelAction *reader) const
743 {
744         if (promises->empty())
745                 return true;
746         for(int i=promises->size()-1;i>=0;i--) {
747                 ModelAction *pr=(*promises)[i]->get_reader(0);
748                 //reader is after promise...doesn't cross any promise
749                 if (*reader > *pr)
750                         return true;
751                 //writer is after promise, reader before...bad...
752                 if (*writer > *pr)
753                         return false;
754         }
755         return true;
756 }
757
758 /**
759  * @brief Add a future value to a reader
760  *
761  * This function performs a few additional checks to ensure that the future
762  * value can be feasibly observed by the reader
763  *
764  * @param writer The operation whose value is sent. Must be a write.
765  * @param reader The read operation which may read the future value. Must be a read.
766  */
767 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
768 {
769         /* Do more ambitious checks now that mo is more complete */
770         if (!mo_may_allow(writer, reader))
771                 return;
772
773         Node *node = reader->get_node();
774
775         /* Find an ancestor thread which exists at the time of the reader */
776         Thread *write_thread = get_thread(writer);
777         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
778                 write_thread = write_thread->get_parent();
779
780         struct future_value fv = {
781                 writer->get_write_value(),
782                 writer->get_seq_number() + params->maxfuturedelay,
783                 write_thread->get_id(),
784         };
785         if (node->add_future_value(fv))
786                 set_latest_backtrack(reader);
787 }
788
789 /**
790  * Process a write ModelAction
791  * @param curr The ModelAction to process
792  * @return True if the mo_graph was updated or promises were resolved
793  */
794 bool ModelExecution::process_write(ModelAction *curr)
795 {
796         /* Readers to which we may send our future value */
797         ModelVector<ModelAction *> send_fv;
798
799         const ModelAction *earliest_promise_reader;
800         bool updated_promises = false;
801
802         bool updated_mod_order = w_modification_order(curr, &send_fv);
803         Promise *promise = pop_promise_to_resolve(curr);
804
805         if (promise) {
806                 earliest_promise_reader = promise->get_reader(0);
807                 updated_promises = resolve_promise(curr, promise);
808         } else
809                 earliest_promise_reader = NULL;
810
811         for (unsigned int i = 0; i < send_fv.size(); i++) {
812                 ModelAction *read = send_fv[i];
813
814                 /* Don't send future values to reads after the Promise we resolve */
815                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
816                         /* Check if future value can be sent immediately */
817                         if (promises_may_allow(curr, read)) {
818                                 add_future_value(curr, read);
819                         } else {
820                                 futurevalues->push_back(PendingFutureValue(curr, read));
821                         }
822                 }
823         }
824
825         /* Check the pending future values */
826         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
827                 struct PendingFutureValue pfv = (*futurevalues)[i];
828                 if (promises_may_allow(pfv.writer, pfv.reader)) {
829                         add_future_value(pfv.writer, pfv.reader);
830                         futurevalues->erase(futurevalues->begin() + i);
831                 }
832         }
833
834         mo_graph->commitChanges();
835         mo_check_promises(curr, false);
836
837         get_thread(curr)->set_return_value(VALUE_NONE);
838         return updated_mod_order || updated_promises;
839 }
840
841 /**
842  * Process a fence ModelAction
843  * @param curr The ModelAction to process
844  * @return True if synchronization was updated
845  */
846 bool ModelExecution::process_fence(ModelAction *curr)
847 {
848         /*
849          * fence-relaxed: no-op
850          * fence-release: only log the occurence (not in this function), for
851          *   use in later synchronization
852          * fence-acquire (this function): search for hypothetical release
853          *   sequences
854          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
855          */
856         bool updated = false;
857         if (curr->is_acquire()) {
858                 action_list_t *list = action_trace;
859                 action_list_t::reverse_iterator rit;
860                 /* Find X : is_read(X) && X --sb-> curr */
861                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
862                         ModelAction *act = *rit;
863                         if (act == curr)
864                                 continue;
865                         if (act->get_tid() != curr->get_tid())
866                                 continue;
867                         /* Stop at the beginning of the thread */
868                         if (act->is_thread_start())
869                                 break;
870                         /* Stop once we reach a prior fence-acquire */
871                         if (act->is_fence() && act->is_acquire())
872                                 break;
873                         if (!act->is_read())
874                                 continue;
875                         /* read-acquire will find its own release sequences */
876                         if (act->is_acquire())
877                                 continue;
878
879                         /* Establish hypothetical release sequences */
880                         rel_heads_list_t release_heads;
881                         get_release_seq_heads(curr, act, &release_heads);
882                         for (unsigned int i = 0; i < release_heads.size(); i++)
883                                 synchronize(release_heads[i], curr);
884                         if (release_heads.size() != 0)
885                                 updated = true;
886                 }
887         }
888         return updated;
889 }
890
891 /**
892  * @brief Process the current action for thread-related activity
893  *
894  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
895  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
896  * synchronization, etc.  This function is a no-op for non-THREAD actions
897  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
898  *
899  * @param curr The current action
900  * @return True if synchronization was updated or a thread completed
901  */
902 bool ModelExecution::process_thread_action(ModelAction *curr)
903 {
904         bool updated = false;
905
906         switch (curr->get_type()) {
907         case THREAD_CREATE: {
908                 thrd_t *thrd = (thrd_t *)curr->get_location();
909                 struct thread_params *params = (struct thread_params *)curr->get_value();
910                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
911                 add_thread(th);
912                 th->set_creation(curr);
913                 /* Promises can be satisfied by children */
914                 for (unsigned int i = 0; i < promises->size(); i++) {
915                         Promise *promise = (*promises)[i];
916                         if (promise->thread_is_available(curr->get_tid()))
917                                 promise->add_thread(th->get_id());
918                 }
919                 break;
920         }
921         case THREAD_JOIN: {
922                 Thread *blocking = curr->get_thread_operand();
923                 ModelAction *act = get_last_action(blocking->get_id());
924                 synchronize(act, curr);
925                 updated = true; /* trigger rel-seq checks */
926                 break;
927         }
928         case THREAD_FINISH: {
929                 Thread *th = get_thread(curr);
930                 /* Wake up any joining threads */
931                 for (unsigned int i = 0; i < get_num_threads(); i++) {
932                         Thread *waiting = get_thread(int_to_id(i));
933                         if (waiting->waiting_on() == th &&
934                                         waiting->get_pending()->is_thread_join())
935                                 scheduler->wake(waiting);
936                 }
937                 th->complete();
938                 /* Completed thread can't satisfy promises */
939                 for (unsigned int i = 0; i < promises->size(); i++) {
940                         Promise *promise = (*promises)[i];
941                         if (promise->thread_is_available(th->get_id()))
942                                 if (promise->eliminate_thread(th->get_id()))
943                                         priv->failed_promise = true;
944                 }
945                 updated = true; /* trigger rel-seq checks */
946                 break;
947         }
948         case THREAD_START: {
949                 check_promises(curr->get_tid(), NULL, curr->get_cv());
950                 break;
951         }
952         default:
953                 break;
954         }
955
956         return updated;
957 }
958
959 /**
960  * @brief Process the current action for release sequence fixup activity
961  *
962  * Performs model-checker release sequence fixups for the current action,
963  * forcing a single pending release sequence to break (with a given, potential
964  * "loose" write) or to complete (i.e., synchronize). If a pending release
965  * sequence forms a complete release sequence, then we must perform the fixup
966  * synchronization, mo_graph additions, etc.
967  *
968  * @param curr The current action; must be a release sequence fixup action
969  * @param work_queue The work queue to which to add work items as they are
970  * generated
971  */
972 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
973 {
974         const ModelAction *write = curr->get_node()->get_relseq_break();
975         struct release_seq *sequence = pending_rel_seqs->back();
976         pending_rel_seqs->pop_back();
977         ASSERT(sequence);
978         ModelAction *acquire = sequence->acquire;
979         const ModelAction *rf = sequence->rf;
980         const ModelAction *release = sequence->release;
981         ASSERT(acquire);
982         ASSERT(release);
983         ASSERT(rf);
984         ASSERT(release->same_thread(rf));
985
986         if (write == NULL) {
987                 /**
988                  * @todo Forcing a synchronization requires that we set
989                  * modification order constraints. For instance, we can't allow
990                  * a fixup sequence in which two separate read-acquire
991                  * operations read from the same sequence, where the first one
992                  * synchronizes and the other doesn't. Essentially, we can't
993                  * allow any writes to insert themselves between 'release' and
994                  * 'rf'
995                  */
996
997                 /* Must synchronize */
998                 if (!synchronize(release, acquire))
999                         return;
1000                 /* Re-check all pending release sequences */
1001                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1002                 /* Re-check act for mo_graph edges */
1003                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1004
1005                 /* propagate synchronization to later actions */
1006                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1007                 for (; (*rit) != acquire; rit++) {
1008                         ModelAction *propagate = *rit;
1009                         if (acquire->happens_before(propagate)) {
1010                                 synchronize(acquire, propagate);
1011                                 /* Re-check 'propagate' for mo_graph edges */
1012                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1013                         }
1014                 }
1015         } else {
1016                 /* Break release sequence with new edges:
1017                  *   release --mo--> write --mo--> rf */
1018                 mo_graph->addEdge(release, write);
1019                 mo_graph->addEdge(write, rf);
1020         }
1021
1022         /* See if we have realized a data race */
1023         checkDataRaces();
1024 }
1025
1026 /**
1027  * Initialize the current action by performing one or more of the following
1028  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1029  * in the NodeStack, manipulating backtracking sets, allocating and
1030  * initializing clock vectors, and computing the promises to fulfill.
1031  *
1032  * @param curr The current action, as passed from the user context; may be
1033  * freed/invalidated after the execution of this function, with a different
1034  * action "returned" its place (pass-by-reference)
1035  * @return True if curr is a newly-explored action; false otherwise
1036  */
1037 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1038 {
1039         ModelAction *newcurr;
1040
1041         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1042                 newcurr = process_rmw(*curr);
1043                 delete *curr;
1044
1045                 if (newcurr->is_rmw())
1046                         compute_promises(newcurr);
1047
1048                 *curr = newcurr;
1049                 return false;
1050         }
1051
1052         (*curr)->set_seq_number(get_next_seq_num());
1053
1054         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1055         if (newcurr) {
1056                 /* First restore type and order in case of RMW operation */
1057                 if ((*curr)->is_rmwr())
1058                         newcurr->copy_typeandorder(*curr);
1059
1060                 ASSERT((*curr)->get_location() == newcurr->get_location());
1061                 newcurr->copy_from_new(*curr);
1062
1063                 /* Discard duplicate ModelAction; use action from NodeStack */
1064                 delete *curr;
1065
1066                 /* Always compute new clock vector */
1067                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1068
1069                 *curr = newcurr;
1070                 return false; /* Action was explored previously */
1071         } else {
1072                 newcurr = *curr;
1073
1074                 /* Always compute new clock vector */
1075                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1076
1077                 /* Assign most recent release fence */
1078                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1079
1080                 /*
1081                  * Perform one-time actions when pushing new ModelAction onto
1082                  * NodeStack
1083                  */
1084                 if (newcurr->is_write())
1085                         compute_promises(newcurr);
1086                 else if (newcurr->is_relseq_fixup())
1087                         compute_relseq_breakwrites(newcurr);
1088                 else if (newcurr->is_wait())
1089                         newcurr->get_node()->set_misc_max(2);
1090                 else if (newcurr->is_notify_one()) {
1091                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1092                 }
1093                 return true; /* This was a new ModelAction */
1094         }
1095 }
1096
1097 /**
1098  * @brief Establish reads-from relation between two actions
1099  *
1100  * Perform basic operations involved with establishing a concrete rf relation,
1101  * including setting the ModelAction data and checking for release sequences.
1102  *
1103  * @param act The action that is reading (must be a read)
1104  * @param rf The action from which we are reading (must be a write)
1105  *
1106  * @return True if this read established synchronization
1107  */
1108 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1109 {
1110         ASSERT(rf);
1111         ASSERT(rf->is_write());
1112
1113         act->set_read_from(rf);
1114         if (act->is_acquire()) {
1115                 rel_heads_list_t release_heads;
1116                 get_release_seq_heads(act, act, &release_heads);
1117                 int num_heads = release_heads.size();
1118                 for (unsigned int i = 0; i < release_heads.size(); i++)
1119                         if (!synchronize(release_heads[i], act))
1120                                 num_heads--;
1121                 return num_heads > 0;
1122         }
1123         return false;
1124 }
1125
1126 /**
1127  * @brief Synchronizes two actions
1128  *
1129  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1130  * This function performs the synchronization as well as providing other hooks
1131  * for other checks along with synchronization.
1132  *
1133  * @param first The left-hand side of the synchronizes-with relation
1134  * @param second The right-hand side of the synchronizes-with relation
1135  * @return True if the synchronization was successful (i.e., was consistent
1136  * with the execution order); false otherwise
1137  */
1138 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1139 {
1140         if (*second < *first) {
1141                 set_bad_synchronization();
1142                 return false;
1143         }
1144         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1145         return second->synchronize_with(first);
1146 }
1147
1148 /**
1149  * Check promises and eliminate potentially-satisfying threads when a thread is
1150  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1151  * no longer satisfy a promise generated from that thread.
1152  *
1153  * @param blocker The thread on which a thread is waiting
1154  * @param waiting The waiting thread
1155  */
1156 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1157 {
1158         for (unsigned int i = 0; i < promises->size(); i++) {
1159                 Promise *promise = (*promises)[i];
1160                 if (!promise->thread_is_available(waiting->get_id()))
1161                         continue;
1162                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1163                         ModelAction *reader = promise->get_reader(j);
1164                         if (reader->get_tid() != blocker->get_id())
1165                                 continue;
1166                         if (promise->eliminate_thread(waiting->get_id())) {
1167                                 /* Promise has failed */
1168                                 priv->failed_promise = true;
1169                         } else {
1170                                 /* Only eliminate the 'waiting' thread once */
1171                                 return;
1172                         }
1173                 }
1174         }
1175 }
1176
1177 /**
1178  * @brief Check whether a model action is enabled.
1179  *
1180  * Checks whether a lock or join operation would be successful (i.e., is the
1181  * lock already locked, or is the joined thread already complete). If not, put
1182  * the action in a waiter list.
1183  *
1184  * @param curr is the ModelAction to check whether it is enabled.
1185  * @return a bool that indicates whether the action is enabled.
1186  */
1187 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1188         if (curr->is_lock()) {
1189                 std::mutex *lock = curr->get_mutex();
1190                 struct std::mutex_state *state = lock->get_state();
1191                 if (state->locked)
1192                         return false;
1193         } else if (curr->is_thread_join()) {
1194                 Thread *blocking = curr->get_thread_operand();
1195                 if (!blocking->is_complete()) {
1196                         thread_blocking_check_promises(blocking, get_thread(curr));
1197                         return false;
1198                 }
1199         }
1200
1201         return true;
1202 }
1203
1204 /**
1205  * This is the heart of the model checker routine. It performs model-checking
1206  * actions corresponding to a given "current action." Among other processes, it
1207  * calculates reads-from relationships, updates synchronization clock vectors,
1208  * forms a memory_order constraints graph, and handles replay/backtrack
1209  * execution when running permutations of previously-observed executions.
1210  *
1211  * @param curr The current action to process
1212  * @return The ModelAction that is actually executed; may be different than
1213  * curr; may be NULL, if the current action is not enabled to run
1214  */
1215 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1216 {
1217         ASSERT(curr);
1218         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1219         bool newly_explored = initialize_curr_action(&curr);
1220
1221         DBG();
1222
1223         wake_up_sleeping_actions(curr);
1224
1225         /* Compute fairness information for CHESS yield algorithm */
1226         if (params->yieldon) {
1227                 curr->get_node()->update_yield(scheduler);
1228         }
1229
1230         /* Add the action to lists before any other model-checking tasks */
1231         if (!second_part_of_rmw)
1232                 add_action_to_lists(curr);
1233
1234         /* Build may_read_from set for newly-created actions */
1235         if (newly_explored && curr->is_read())
1236                 build_may_read_from(curr);
1237
1238         /* Initialize work_queue with the "current action" work */
1239         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1240         while (!work_queue.empty() && !has_asserted()) {
1241                 WorkQueueEntry work = work_queue.front();
1242                 work_queue.pop_front();
1243
1244                 switch (work.type) {
1245                 case WORK_CHECK_CURR_ACTION: {
1246                         ModelAction *act = work.action;
1247                         bool update = false; /* update this location's release seq's */
1248                         bool update_all = false; /* update all release seq's */
1249
1250                         if (process_thread_action(curr))
1251                                 update_all = true;
1252
1253                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1254                                 update = true;
1255
1256                         if (act->is_write() && process_write(act))
1257                                 update = true;
1258
1259                         if (act->is_fence() && process_fence(act))
1260                                 update_all = true;
1261
1262                         if (act->is_mutex_op() && process_mutex(act))
1263                                 update_all = true;
1264
1265                         if (act->is_relseq_fixup())
1266                                 process_relseq_fixup(curr, &work_queue);
1267
1268                         if (update_all)
1269                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1270                         else if (update)
1271                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1272                         break;
1273                 }
1274                 case WORK_CHECK_RELEASE_SEQ:
1275                         resolve_release_sequences(work.location, &work_queue);
1276                         break;
1277                 case WORK_CHECK_MO_EDGES: {
1278                         /** @todo Complete verification of work_queue */
1279                         ModelAction *act = work.action;
1280                         bool updated = false;
1281
1282                         if (act->is_read()) {
1283                                 const ModelAction *rf = act->get_reads_from();
1284                                 const Promise *promise = act->get_reads_from_promise();
1285                                 if (rf) {
1286                                         if (r_modification_order(act, rf))
1287                                                 updated = true;
1288                                 } else if (promise) {
1289                                         if (r_modification_order(act, promise))
1290                                                 updated = true;
1291                                 }
1292                         }
1293                         if (act->is_write()) {
1294                                 if (w_modification_order(act, NULL))
1295                                         updated = true;
1296                         }
1297                         mo_graph->commitChanges();
1298
1299                         if (updated)
1300                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1301                         break;
1302                 }
1303                 default:
1304                         ASSERT(false);
1305                         break;
1306                 }
1307         }
1308
1309         check_curr_backtracking(curr);
1310         set_backtracking(curr);
1311         return curr;
1312 }
1313
1314 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1315 {
1316         Node *currnode = curr->get_node();
1317         Node *parnode = currnode->get_parent();
1318
1319         if ((parnode && !parnode->backtrack_empty()) ||
1320                          !currnode->misc_empty() ||
1321                          !currnode->read_from_empty() ||
1322                          !currnode->promise_empty() ||
1323                          !currnode->relseq_break_empty()) {
1324                 set_latest_backtrack(curr);
1325         }
1326 }
1327
1328 bool ModelExecution::promises_expired() const
1329 {
1330         for (unsigned int i = 0; i < promises->size(); i++) {
1331                 Promise *promise = (*promises)[i];
1332                 if (promise->get_expiration() < priv->used_sequence_numbers)
1333                         return true;
1334         }
1335         return false;
1336 }
1337
1338 /**
1339  * This is the strongest feasibility check available.
1340  * @return whether the current trace (partial or complete) must be a prefix of
1341  * a feasible trace.
1342  */
1343 bool ModelExecution::isfeasibleprefix() const
1344 {
1345         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1346 }
1347
1348 /**
1349  * Print disagnostic information about an infeasible execution
1350  * @param prefix A string to prefix the output with; if NULL, then a default
1351  * message prefix will be provided
1352  */
1353 void ModelExecution::print_infeasibility(const char *prefix) const
1354 {
1355         char buf[100];
1356         char *ptr = buf;
1357         if (mo_graph->checkForCycles())
1358                 ptr += sprintf(ptr, "[mo cycle]");
1359         if (priv->failed_promise)
1360                 ptr += sprintf(ptr, "[failed promise]");
1361         if (priv->too_many_reads)
1362                 ptr += sprintf(ptr, "[too many reads]");
1363         if (priv->no_valid_reads)
1364                 ptr += sprintf(ptr, "[no valid reads-from]");
1365         if (priv->bad_synchronization)
1366                 ptr += sprintf(ptr, "[bad sw ordering]");
1367         if (promises_expired())
1368                 ptr += sprintf(ptr, "[promise expired]");
1369         if (promises->size() != 0)
1370                 ptr += sprintf(ptr, "[unresolved promise]");
1371         if (ptr != buf)
1372                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1373 }
1374
1375 /**
1376  * Returns whether the current completed trace is feasible, except for pending
1377  * release sequences.
1378  */
1379 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1380 {
1381         return !is_infeasible() && promises->size() == 0;
1382 }
1383
1384 /**
1385  * Check if the current partial trace is infeasible. Does not check any
1386  * end-of-execution flags, which might rule out the execution. Thus, this is
1387  * useful only for ruling an execution as infeasible.
1388  * @return whether the current partial trace is infeasible.
1389  */
1390 bool ModelExecution::is_infeasible() const
1391 {
1392         return mo_graph->checkForCycles() ||
1393                 priv->no_valid_reads ||
1394                 priv->failed_promise ||
1395                 priv->too_many_reads ||
1396                 priv->bad_synchronization ||
1397                 promises_expired();
1398 }
1399
1400 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1401 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1402         ModelAction *lastread = get_last_action(act->get_tid());
1403         lastread->process_rmw(act);
1404         if (act->is_rmw()) {
1405                 if (lastread->get_reads_from())
1406                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1407                 else
1408                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1409                 mo_graph->commitChanges();
1410         }
1411         return lastread;
1412 }
1413
1414 /**
1415  * A helper function for ModelExecution::check_recency, to check if the current
1416  * thread is able to read from a different write/promise for 'params.maxreads'
1417  * number of steps and if that write/promise should become visible (i.e., is
1418  * ordered later in the modification order). This helps model memory liveness.
1419  *
1420  * @param curr The current action. Must be a read.
1421  * @param rf The write/promise from which we plan to read
1422  * @param other_rf The write/promise from which we may read
1423  * @return True if we were able to read from other_rf for params.maxreads steps
1424  */
1425 template <typename T, typename U>
1426 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1427 {
1428         /* Need a different write/promise */
1429         if (other_rf->equals(rf))
1430                 return false;
1431
1432         /* Only look for "newer" writes/promises */
1433         if (!mo_graph->checkReachable(rf, other_rf))
1434                 return false;
1435
1436         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1437         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1438         action_list_t::reverse_iterator rit = list->rbegin();
1439         ASSERT((*rit) == curr);
1440         /* Skip past curr */
1441         rit++;
1442
1443         /* Does this write/promise work for everyone? */
1444         for (int i = 0; i < params->maxreads; i++, rit++) {
1445                 ModelAction *act = *rit;
1446                 if (!act->may_read_from(other_rf))
1447                         return false;
1448         }
1449         return true;
1450 }
1451
1452 /**
1453  * Checks whether a thread has read from the same write or Promise for too many
1454  * times without seeing the effects of a later write/Promise.
1455  *
1456  * Basic idea:
1457  * 1) there must a different write/promise that we could read from,
1458  * 2) we must have read from the same write/promise in excess of maxreads times,
1459  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1460  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1461  *
1462  * If so, we decide that the execution is no longer feasible.
1463  *
1464  * @param curr The current action. Must be a read.
1465  * @param rf The ModelAction/Promise from which we might read.
1466  * @return True if the read should succeed; false otherwise
1467  */
1468 template <typename T>
1469 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1470 {
1471         if (!params->maxreads)
1472                 return true;
1473
1474         //NOTE: Next check is just optimization, not really necessary....
1475         if (curr->get_node()->get_read_from_past_size() +
1476                         curr->get_node()->get_read_from_promise_size() <= 1)
1477                 return true;
1478
1479         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1480         int tid = id_to_int(curr->get_tid());
1481         ASSERT(tid < (int)thrd_lists->size());
1482         action_list_t *list = &(*thrd_lists)[tid];
1483         action_list_t::reverse_iterator rit = list->rbegin();
1484         ASSERT((*rit) == curr);
1485         /* Skip past curr */
1486         rit++;
1487
1488         action_list_t::reverse_iterator ritcopy = rit;
1489         /* See if we have enough reads from the same value */
1490         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1491                 if (ritcopy == list->rend())
1492                         return true;
1493                 ModelAction *act = *ritcopy;
1494                 if (!act->is_read())
1495                         return true;
1496                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1497                         return true;
1498                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1499                         return true;
1500                 if (act->get_node()->get_read_from_past_size() +
1501                                 act->get_node()->get_read_from_promise_size() <= 1)
1502                         return true;
1503         }
1504         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1505                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1506                 if (should_read_instead(curr, rf, write))
1507                         return false; /* liveness failure */
1508         }
1509         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1510                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1511                 if (should_read_instead(curr, rf, promise))
1512                         return false; /* liveness failure */
1513         }
1514         return true;
1515 }
1516
1517 /**
1518  * @brief Updates the mo_graph with the constraints imposed from the current
1519  * read.
1520  *
1521  * Basic idea is the following: Go through each other thread and find
1522  * the last action that happened before our read.  Two cases:
1523  *
1524  * -# The action is a write: that write must either occur before
1525  * the write we read from or be the write we read from.
1526  * -# The action is a read: the write that that action read from
1527  * must occur before the write we read from or be the same write.
1528  *
1529  * @param curr The current action. Must be a read.
1530  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1531  * @return True if modification order edges were added; false otherwise
1532  */
1533 template <typename rf_type>
1534 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1535 {
1536         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1537         unsigned int i;
1538         bool added = false;
1539         ASSERT(curr->is_read());
1540
1541         /* Last SC fence in the current thread */
1542         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1543         ModelAction *last_sc_write = NULL;
1544         if (curr->is_seqcst())
1545                 last_sc_write = get_last_seq_cst_write(curr);
1546
1547         /* Iterate over all threads */
1548         for (i = 0; i < thrd_lists->size(); i++) {
1549                 /* Last SC fence in thread i */
1550                 ModelAction *last_sc_fence_thread_local = NULL;
1551                 if (int_to_id((int)i) != curr->get_tid())
1552                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1553
1554                 /* Last SC fence in thread i, before last SC fence in current thread */
1555                 ModelAction *last_sc_fence_thread_before = NULL;
1556                 if (last_sc_fence_local)
1557                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1558
1559                 /* Iterate over actions in thread, starting from most recent */
1560                 action_list_t *list = &(*thrd_lists)[i];
1561                 action_list_t::reverse_iterator rit;
1562                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1563                         ModelAction *act = *rit;
1564
1565                         /* Skip curr */
1566                         if (act == curr)
1567                                 continue;
1568                         /* Don't want to add reflexive edges on 'rf' */
1569                         if (act->equals(rf)) {
1570                                 if (act->happens_before(curr))
1571                                         break;
1572                                 else
1573                                         continue;
1574                         }
1575
1576                         if (act->is_write()) {
1577                                 /* C++, Section 29.3 statement 5 */
1578                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1579                                                 *act < *last_sc_fence_thread_local) {
1580                                         added = mo_graph->addEdge(act, rf) || added;
1581                                         break;
1582                                 }
1583                                 /* C++, Section 29.3 statement 4 */
1584                                 else if (act->is_seqcst() && last_sc_fence_local &&
1585                                                 *act < *last_sc_fence_local) {
1586                                         added = mo_graph->addEdge(act, rf) || added;
1587                                         break;
1588                                 }
1589                                 /* C++, Section 29.3 statement 6 */
1590                                 else if (last_sc_fence_thread_before &&
1591                                                 *act < *last_sc_fence_thread_before) {
1592                                         added = mo_graph->addEdge(act, rf) || added;
1593                                         break;
1594                                 }
1595                         }
1596
1597                         /* C++, Section 29.3 statement 3 (second subpoint) */
1598                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1599                                 added = mo_graph->addEdge(act, rf) || added;
1600                                 break;
1601                         }
1602
1603                         /*
1604                          * Include at most one act per-thread that "happens
1605                          * before" curr
1606                          */
1607                         if (act->happens_before(curr)) {
1608                                 if (act->is_write()) {
1609                                         added = mo_graph->addEdge(act, rf) || added;
1610                                 } else {
1611                                         const ModelAction *prevrf = act->get_reads_from();
1612                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1613                                         if (prevrf) {
1614                                                 if (!prevrf->equals(rf))
1615                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1616                                         } else if (!prevrf_promise->equals(rf)) {
1617                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1618                                         }
1619                                 }
1620                                 break;
1621                         }
1622                 }
1623         }
1624
1625         /*
1626          * All compatible, thread-exclusive promises must be ordered after any
1627          * concrete loads from the same thread
1628          */
1629         for (unsigned int i = 0; i < promises->size(); i++)
1630                 if ((*promises)[i]->is_compatible_exclusive(curr))
1631                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1632
1633         return added;
1634 }
1635
1636 /**
1637  * Updates the mo_graph with the constraints imposed from the current write.
1638  *
1639  * Basic idea is the following: Go through each other thread and find
1640  * the lastest action that happened before our write.  Two cases:
1641  *
1642  * (1) The action is a write => that write must occur before
1643  * the current write
1644  *
1645  * (2) The action is a read => the write that that action read from
1646  * must occur before the current write.
1647  *
1648  * This method also handles two other issues:
1649  *
1650  * (I) Sequential Consistency: Making sure that if the current write is
1651  * seq_cst, that it occurs after the previous seq_cst write.
1652  *
1653  * (II) Sending the write back to non-synchronizing reads.
1654  *
1655  * @param curr The current action. Must be a write.
1656  * @param send_fv A vector for stashing reads to which we may pass our future
1657  * value. If NULL, then don't record any future values.
1658  * @return True if modification order edges were added; false otherwise
1659  */
1660 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1661 {
1662         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1663         unsigned int i;
1664         bool added = false;
1665         ASSERT(curr->is_write());
1666
1667         if (curr->is_seqcst()) {
1668                 /* We have to at least see the last sequentially consistent write,
1669                          so we are initialized. */
1670                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1671                 if (last_seq_cst != NULL) {
1672                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1673                 }
1674         }
1675
1676         /* Last SC fence in the current thread */
1677         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1678
1679         /* Iterate over all threads */
1680         for (i = 0; i < thrd_lists->size(); i++) {
1681                 /* Last SC fence in thread i, before last SC fence in current thread */
1682                 ModelAction *last_sc_fence_thread_before = NULL;
1683                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1684                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1685
1686                 /* Iterate over actions in thread, starting from most recent */
1687                 action_list_t *list = &(*thrd_lists)[i];
1688                 action_list_t::reverse_iterator rit;
1689                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1690                         ModelAction *act = *rit;
1691                         if (act == curr) {
1692                                 /*
1693                                  * 1) If RMW and it actually read from something, then we
1694                                  * already have all relevant edges, so just skip to next
1695                                  * thread.
1696                                  *
1697                                  * 2) If RMW and it didn't read from anything, we should
1698                                  * whatever edge we can get to speed up convergence.
1699                                  *
1700                                  * 3) If normal write, we need to look at earlier actions, so
1701                                  * continue processing list.
1702                                  */
1703                                 if (curr->is_rmw()) {
1704                                         if (curr->get_reads_from() != NULL)
1705                                                 break;
1706                                         else
1707                                                 continue;
1708                                 } else
1709                                         continue;
1710                         }
1711
1712                         /* C++, Section 29.3 statement 7 */
1713                         if (last_sc_fence_thread_before && act->is_write() &&
1714                                         *act < *last_sc_fence_thread_before) {
1715                                 added = mo_graph->addEdge(act, curr) || added;
1716                                 break;
1717                         }
1718
1719                         /*
1720                          * Include at most one act per-thread that "happens
1721                          * before" curr
1722                          */
1723                         if (act->happens_before(curr)) {
1724                                 /*
1725                                  * Note: if act is RMW, just add edge:
1726                                  *   act --mo--> curr
1727                                  * The following edge should be handled elsewhere:
1728                                  *   readfrom(act) --mo--> act
1729                                  */
1730                                 if (act->is_write())
1731                                         added = mo_graph->addEdge(act, curr) || added;
1732                                 else if (act->is_read()) {
1733                                         //if previous read accessed a null, just keep going
1734                                         if (act->get_reads_from() == NULL)
1735                                                 continue;
1736                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1737                                 }
1738                                 break;
1739                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1740                                                      !act->same_thread(curr)) {
1741                                 /* We have an action that:
1742                                    (1) did not happen before us
1743                                    (2) is a read and we are a write
1744                                    (3) cannot synchronize with us
1745                                    (4) is in a different thread
1746                                    =>
1747                                    that read could potentially read from our write.  Note that
1748                                    these checks are overly conservative at this point, we'll
1749                                    do more checks before actually removing the
1750                                    pendingfuturevalue.
1751
1752                                  */
1753                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1754                                         if (!is_infeasible())
1755                                                 send_fv->push_back(act);
1756                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1757                                                 add_future_value(curr, act);
1758                                 }
1759                         }
1760                 }
1761         }
1762
1763         /*
1764          * All compatible, thread-exclusive promises must be ordered after any
1765          * concrete stores to the same thread, or else they can be merged with
1766          * this store later
1767          */
1768         for (unsigned int i = 0; i < promises->size(); i++)
1769                 if ((*promises)[i]->is_compatible_exclusive(curr))
1770                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1771
1772         return added;
1773 }
1774
1775 /** Arbitrary reads from the future are not allowed.  Section 29.3
1776  * part 9 places some constraints.  This method checks one result of constraint
1777  * constraint.  Others require compiler support. */
1778 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1779 {
1780         if (!writer->is_rmw())
1781                 return true;
1782
1783         if (!reader->is_rmw())
1784                 return true;
1785
1786         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1787                 if (search == reader)
1788                         return false;
1789                 if (search->get_tid() == reader->get_tid() &&
1790                                 search->happens_before(reader))
1791                         break;
1792         }
1793
1794         return true;
1795 }
1796
1797 /**
1798  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1799  * some constraints. This method checks one the following constraint (others
1800  * require compiler support):
1801  *
1802  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1803  */
1804 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1805 {
1806         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1807         unsigned int i;
1808         /* Iterate over all threads */
1809         for (i = 0; i < thrd_lists->size(); i++) {
1810                 const ModelAction *write_after_read = NULL;
1811
1812                 /* Iterate over actions in thread, starting from most recent */
1813                 action_list_t *list = &(*thrd_lists)[i];
1814                 action_list_t::reverse_iterator rit;
1815                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1816                         ModelAction *act = *rit;
1817
1818                         /* Don't disallow due to act == reader */
1819                         if (!reader->happens_before(act) || reader == act)
1820                                 break;
1821                         else if (act->is_write())
1822                                 write_after_read = act;
1823                         else if (act->is_read() && act->get_reads_from() != NULL)
1824                                 write_after_read = act->get_reads_from();
1825                 }
1826
1827                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1828                         return false;
1829         }
1830         return true;
1831 }
1832
1833 /**
1834  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1835  * The ModelAction under consideration is expected to be taking part in
1836  * release/acquire synchronization as an object of the "reads from" relation.
1837  * Note that this can only provide release sequence support for RMW chains
1838  * which do not read from the future, as those actions cannot be traced until
1839  * their "promise" is fulfilled. Similarly, we may not even establish the
1840  * presence of a release sequence with certainty, as some modification order
1841  * constraints may be decided further in the future. Thus, this function
1842  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1843  * and a boolean representing certainty.
1844  *
1845  * @param rf The action that might be part of a release sequence. Must be a
1846  * write.
1847  * @param release_heads A pass-by-reference style return parameter. After
1848  * execution of this function, release_heads will contain the heads of all the
1849  * relevant release sequences, if any exists with certainty
1850  * @param pending A pass-by-reference style return parameter which is only used
1851  * when returning false (i.e., uncertain). Returns most information regarding
1852  * an uncertain release sequence, including any write operations that might
1853  * break the sequence.
1854  * @return true, if the ModelExecution is certain that release_heads is complete;
1855  * false otherwise
1856  */
1857 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1858                 rel_heads_list_t *release_heads,
1859                 struct release_seq *pending) const
1860 {
1861         /* Only check for release sequences if there are no cycles */
1862         if (mo_graph->checkForCycles())
1863                 return false;
1864
1865         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1866                 ASSERT(rf->is_write());
1867
1868                 if (rf->is_release())
1869                         release_heads->push_back(rf);
1870                 else if (rf->get_last_fence_release())
1871                         release_heads->push_back(rf->get_last_fence_release());
1872                 if (!rf->is_rmw())
1873                         break; /* End of RMW chain */
1874
1875                 /** @todo Need to be smarter here...  In the linux lock
1876                  * example, this will run to the beginning of the program for
1877                  * every acquire. */
1878                 /** @todo The way to be smarter here is to keep going until 1
1879                  * thread has a release preceded by an acquire and you've seen
1880                  *       both. */
1881
1882                 /* acq_rel RMW is a sufficient stopping condition */
1883                 if (rf->is_acquire() && rf->is_release())
1884                         return true; /* complete */
1885         };
1886         if (!rf) {
1887                 /* read from future: need to settle this later */
1888                 pending->rf = NULL;
1889                 return false; /* incomplete */
1890         }
1891
1892         if (rf->is_release())
1893                 return true; /* complete */
1894
1895         /* else relaxed write
1896          * - check for fence-release in the same thread (29.8, stmt. 3)
1897          * - check modification order for contiguous subsequence
1898          *   -> rf must be same thread as release */
1899
1900         const ModelAction *fence_release = rf->get_last_fence_release();
1901         /* Synchronize with a fence-release unconditionally; we don't need to
1902          * find any more "contiguous subsequence..." for it */
1903         if (fence_release)
1904                 release_heads->push_back(fence_release);
1905
1906         int tid = id_to_int(rf->get_tid());
1907         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1908         action_list_t *list = &(*thrd_lists)[tid];
1909         action_list_t::const_reverse_iterator rit;
1910
1911         /* Find rf in the thread list */
1912         rit = std::find(list->rbegin(), list->rend(), rf);
1913         ASSERT(rit != list->rend());
1914
1915         /* Find the last {write,fence}-release */
1916         for (; rit != list->rend(); rit++) {
1917                 if (fence_release && *(*rit) < *fence_release)
1918                         break;
1919                 if ((*rit)->is_release())
1920                         break;
1921         }
1922         if (rit == list->rend()) {
1923                 /* No write-release in this thread */
1924                 return true; /* complete */
1925         } else if (fence_release && *(*rit) < *fence_release) {
1926                 /* The fence-release is more recent (and so, "stronger") than
1927                  * the most recent write-release */
1928                 return true; /* complete */
1929         } /* else, need to establish contiguous release sequence */
1930         ModelAction *release = *rit;
1931
1932         ASSERT(rf->same_thread(release));
1933
1934         pending->writes.clear();
1935
1936         bool certain = true;
1937         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1938                 if (id_to_int(rf->get_tid()) == (int)i)
1939                         continue;
1940                 list = &(*thrd_lists)[i];
1941
1942                 /* Can we ensure no future writes from this thread may break
1943                  * the release seq? */
1944                 bool future_ordered = false;
1945
1946                 ModelAction *last = get_last_action(int_to_id(i));
1947                 Thread *th = get_thread(int_to_id(i));
1948                 if ((last && rf->happens_before(last)) ||
1949                                 !is_enabled(th) ||
1950                                 th->is_complete())
1951                         future_ordered = true;
1952
1953                 ASSERT(!th->is_model_thread() || future_ordered);
1954
1955                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1956                         const ModelAction *act = *rit;
1957                         /* Reach synchronization -> this thread is complete */
1958                         if (act->happens_before(release))
1959                                 break;
1960                         if (rf->happens_before(act)) {
1961                                 future_ordered = true;
1962                                 continue;
1963                         }
1964
1965                         /* Only non-RMW writes can break release sequences */
1966                         if (!act->is_write() || act->is_rmw())
1967                                 continue;
1968
1969                         /* Check modification order */
1970                         if (mo_graph->checkReachable(rf, act)) {
1971                                 /* rf --mo--> act */
1972                                 future_ordered = true;
1973                                 continue;
1974                         }
1975                         if (mo_graph->checkReachable(act, release))
1976                                 /* act --mo--> release */
1977                                 break;
1978                         if (mo_graph->checkReachable(release, act) &&
1979                                       mo_graph->checkReachable(act, rf)) {
1980                                 /* release --mo-> act --mo--> rf */
1981                                 return true; /* complete */
1982                         }
1983                         /* act may break release sequence */
1984                         pending->writes.push_back(act);
1985                         certain = false;
1986                 }
1987                 if (!future_ordered)
1988                         certain = false; /* This thread is uncertain */
1989         }
1990
1991         if (certain) {
1992                 release_heads->push_back(release);
1993                 pending->writes.clear();
1994         } else {
1995                 pending->release = release;
1996                 pending->rf = rf;
1997         }
1998         return certain;
1999 }
2000
2001 /**
2002  * An interface for getting the release sequence head(s) with which a
2003  * given ModelAction must synchronize. This function only returns a non-empty
2004  * result when it can locate a release sequence head with certainty. Otherwise,
2005  * it may mark the internal state of the ModelExecution so that it will handle
2006  * the release sequence at a later time, causing @a acquire to update its
2007  * synchronization at some later point in execution.
2008  *
2009  * @param acquire The 'acquire' action that may synchronize with a release
2010  * sequence
2011  * @param read The read action that may read from a release sequence; this may
2012  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2013  * when 'acquire' is a fence-acquire)
2014  * @param release_heads A pass-by-reference return parameter. Will be filled
2015  * with the head(s) of the release sequence(s), if they exists with certainty.
2016  * @see ModelExecution::release_seq_heads
2017  */
2018 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2019                 ModelAction *read, rel_heads_list_t *release_heads)
2020 {
2021         const ModelAction *rf = read->get_reads_from();
2022         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2023         sequence->acquire = acquire;
2024         sequence->read = read;
2025
2026         if (!release_seq_heads(rf, release_heads, sequence)) {
2027                 /* add act to 'lazy checking' list */
2028                 pending_rel_seqs->push_back(sequence);
2029         } else {
2030                 snapshot_free(sequence);
2031         }
2032 }
2033
2034 /**
2035  * Attempt to resolve all stashed operations that might synchronize with a
2036  * release sequence for a given location. This implements the "lazy" portion of
2037  * determining whether or not a release sequence was contiguous, since not all
2038  * modification order information is present at the time an action occurs.
2039  *
2040  * @param location The location/object that should be checked for release
2041  * sequence resolutions. A NULL value means to check all locations.
2042  * @param work_queue The work queue to which to add work items as they are
2043  * generated
2044  * @return True if any updates occurred (new synchronization, new mo_graph
2045  * edges)
2046  */
2047 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2048 {
2049         bool updated = false;
2050         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2051         while (it != pending_rel_seqs->end()) {
2052                 struct release_seq *pending = *it;
2053                 ModelAction *acquire = pending->acquire;
2054                 const ModelAction *read = pending->read;
2055
2056                 /* Only resolve sequences on the given location, if provided */
2057                 if (location && read->get_location() != location) {
2058                         it++;
2059                         continue;
2060                 }
2061
2062                 const ModelAction *rf = read->get_reads_from();
2063                 rel_heads_list_t release_heads;
2064                 bool complete;
2065                 complete = release_seq_heads(rf, &release_heads, pending);
2066                 for (unsigned int i = 0; i < release_heads.size(); i++)
2067                         if (!acquire->has_synchronized_with(release_heads[i]))
2068                                 if (synchronize(release_heads[i], acquire))
2069                                         updated = true;
2070
2071                 if (updated) {
2072                         /* Re-check all pending release sequences */
2073                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2074                         /* Re-check read-acquire for mo_graph edges */
2075                         if (acquire->is_read())
2076                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2077
2078                         /* propagate synchronization to later actions */
2079                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2080                         for (; (*rit) != acquire; rit++) {
2081                                 ModelAction *propagate = *rit;
2082                                 if (acquire->happens_before(propagate)) {
2083                                         synchronize(acquire, propagate);
2084                                         /* Re-check 'propagate' for mo_graph edges */
2085                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2086                                 }
2087                         }
2088                 }
2089                 if (complete) {
2090                         it = pending_rel_seqs->erase(it);
2091                         snapshot_free(pending);
2092                 } else {
2093                         it++;
2094                 }
2095         }
2096
2097         // If we resolved promises or data races, see if we have realized a data race.
2098         checkDataRaces();
2099
2100         return updated;
2101 }
2102
2103 /**
2104  * Performs various bookkeeping operations for the current ModelAction. For
2105  * instance, adds action to the per-object, per-thread action vector and to the
2106  * action trace list of all thread actions.
2107  *
2108  * @param act is the ModelAction to add.
2109  */
2110 void ModelExecution::add_action_to_lists(ModelAction *act)
2111 {
2112         int tid = id_to_int(act->get_tid());
2113         ModelAction *uninit = NULL;
2114         int uninit_id = -1;
2115         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2116         if (list->empty() && act->is_atomic_var()) {
2117                 uninit = get_uninitialized_action(act);
2118                 uninit_id = id_to_int(uninit->get_tid());
2119                 list->push_front(uninit);
2120         }
2121         list->push_back(act);
2122
2123         action_trace->push_back(act);
2124         if (uninit)
2125                 action_trace->push_front(uninit);
2126
2127         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2128         if (tid >= (int)vec->size())
2129                 vec->resize(priv->next_thread_id);
2130         (*vec)[tid].push_back(act);
2131         if (uninit)
2132                 (*vec)[uninit_id].push_front(uninit);
2133
2134         if ((int)thrd_last_action->size() <= tid)
2135                 thrd_last_action->resize(get_num_threads());
2136         (*thrd_last_action)[tid] = act;
2137         if (uninit)
2138                 (*thrd_last_action)[uninit_id] = uninit;
2139
2140         if (act->is_fence() && act->is_release()) {
2141                 if ((int)thrd_last_fence_release->size() <= tid)
2142                         thrd_last_fence_release->resize(get_num_threads());
2143                 (*thrd_last_fence_release)[tid] = act;
2144         }
2145
2146         if (act->is_wait()) {
2147                 void *mutex_loc = (void *) act->get_value();
2148                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2149
2150                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2151                 if (tid >= (int)vec->size())
2152                         vec->resize(priv->next_thread_id);
2153                 (*vec)[tid].push_back(act);
2154         }
2155 }
2156
2157 /**
2158  * @brief Get the last action performed by a particular Thread
2159  * @param tid The thread ID of the Thread in question
2160  * @return The last action in the thread
2161  */
2162 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2163 {
2164         int threadid = id_to_int(tid);
2165         if (threadid < (int)thrd_last_action->size())
2166                 return (*thrd_last_action)[id_to_int(tid)];
2167         else
2168                 return NULL;
2169 }
2170
2171 /**
2172  * @brief Get the last fence release performed by a particular Thread
2173  * @param tid The thread ID of the Thread in question
2174  * @return The last fence release in the thread, if one exists; NULL otherwise
2175  */
2176 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2177 {
2178         int threadid = id_to_int(tid);
2179         if (threadid < (int)thrd_last_fence_release->size())
2180                 return (*thrd_last_fence_release)[id_to_int(tid)];
2181         else
2182                 return NULL;
2183 }
2184
2185 /**
2186  * Gets the last memory_order_seq_cst write (in the total global sequence)
2187  * performed on a particular object (i.e., memory location), not including the
2188  * current action.
2189  * @param curr The current ModelAction; also denotes the object location to
2190  * check
2191  * @return The last seq_cst write
2192  */
2193 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2194 {
2195         void *location = curr->get_location();
2196         action_list_t *list = get_safe_ptr_action(obj_map, location);
2197         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2198         action_list_t::reverse_iterator rit;
2199         for (rit = list->rbegin(); (*rit) != curr; rit++)
2200                 ;
2201         rit++; /* Skip past curr */
2202         for ( ; rit != list->rend(); rit++)
2203                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2204                         return *rit;
2205         return NULL;
2206 }
2207
2208 /**
2209  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2210  * performed in a particular thread, prior to a particular fence.
2211  * @param tid The ID of the thread to check
2212  * @param before_fence The fence from which to begin the search; if NULL, then
2213  * search for the most recent fence in the thread.
2214  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2215  */
2216 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2217 {
2218         /* All fences should have NULL location */
2219         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2220         action_list_t::reverse_iterator rit = list->rbegin();
2221
2222         if (before_fence) {
2223                 for (; rit != list->rend(); rit++)
2224                         if (*rit == before_fence)
2225                                 break;
2226
2227                 ASSERT(*rit == before_fence);
2228                 rit++;
2229         }
2230
2231         for (; rit != list->rend(); rit++)
2232                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2233                         return *rit;
2234         return NULL;
2235 }
2236
2237 /**
2238  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2239  * location). This function identifies the mutex according to the current
2240  * action, which is presumed to perform on the same mutex.
2241  * @param curr The current ModelAction; also denotes the object location to
2242  * check
2243  * @return The last unlock operation
2244  */
2245 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2246 {
2247         void *location = curr->get_location();
2248         action_list_t *list = get_safe_ptr_action(obj_map, location);
2249         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2250         action_list_t::reverse_iterator rit;
2251         for (rit = list->rbegin(); rit != list->rend(); rit++)
2252                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2253                         return *rit;
2254         return NULL;
2255 }
2256
2257 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2258 {
2259         ModelAction *parent = get_last_action(tid);
2260         if (!parent)
2261                 parent = get_thread(tid)->get_creation();
2262         return parent;
2263 }
2264
2265 /**
2266  * Returns the clock vector for a given thread.
2267  * @param tid The thread whose clock vector we want
2268  * @return Desired clock vector
2269  */
2270 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2271 {
2272         return get_parent_action(tid)->get_cv();
2273 }
2274
2275 /**
2276  * @brief Find the promise (if any) to resolve for the current action and
2277  * remove it from the pending promise vector
2278  * @param curr The current ModelAction. Should be a write.
2279  * @return The Promise to resolve, if any; otherwise NULL
2280  */
2281 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2282 {
2283         for (unsigned int i = 0; i < promises->size(); i++)
2284                 if (curr->get_node()->get_promise(i)) {
2285                         Promise *ret = (*promises)[i];
2286                         promises->erase(promises->begin() + i);
2287                         return ret;
2288                 }
2289         return NULL;
2290 }
2291
2292 /**
2293  * Resolve a Promise with a current write.
2294  * @param write The ModelAction that is fulfilling Promises
2295  * @param promise The Promise to resolve
2296  * @return True if the Promise was successfully resolved; false otherwise
2297  */
2298 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2299 {
2300         ModelVector<ModelAction *> actions_to_check;
2301
2302         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2303                 ModelAction *read = promise->get_reader(i);
2304                 read_from(read, write);
2305                 actions_to_check.push_back(read);
2306         }
2307         /* Make sure the promise's value matches the write's value */
2308         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2309         if (!mo_graph->resolvePromise(promise, write))
2310                 priv->failed_promise = true;
2311
2312         /**
2313          * @todo  It is possible to end up in an inconsistent state, where a
2314          * "resolved" promise may still be referenced if
2315          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2316          *
2317          * Note that the inconsistency only matters when dumping mo_graph to
2318          * file.
2319          *
2320          * delete promise;
2321          */
2322
2323         //Check whether reading these writes has made threads unable to
2324         //resolve promises
2325         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2326                 ModelAction *read = actions_to_check[i];
2327                 mo_check_promises(read, true);
2328         }
2329
2330         return true;
2331 }
2332
2333 /**
2334  * Compute the set of promises that could potentially be satisfied by this
2335  * action. Note that the set computation actually appears in the Node, not in
2336  * ModelExecution.
2337  * @param curr The ModelAction that may satisfy promises
2338  */
2339 void ModelExecution::compute_promises(ModelAction *curr)
2340 {
2341         for (unsigned int i = 0; i < promises->size(); i++) {
2342                 Promise *promise = (*promises)[i];
2343                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2344                         continue;
2345
2346                 bool satisfy = true;
2347                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2348                         const ModelAction *act = promise->get_reader(j);
2349                         if (act->happens_before(curr) ||
2350                                         act->could_synchronize_with(curr)) {
2351                                 satisfy = false;
2352                                 break;
2353                         }
2354                 }
2355                 if (satisfy)
2356                         curr->get_node()->set_promise(i);
2357         }
2358 }
2359
2360 /** Checks promises in response to change in ClockVector Threads. */
2361 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2362 {
2363         for (unsigned int i = 0; i < promises->size(); i++) {
2364                 Promise *promise = (*promises)[i];
2365                 if (!promise->thread_is_available(tid))
2366                         continue;
2367                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2368                         const ModelAction *act = promise->get_reader(j);
2369                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2370                                         merge_cv->synchronized_since(act)) {
2371                                 if (promise->eliminate_thread(tid)) {
2372                                         /* Promise has failed */
2373                                         priv->failed_promise = true;
2374                                         return;
2375                                 }
2376                         }
2377                 }
2378         }
2379 }
2380
2381 void ModelExecution::check_promises_thread_disabled()
2382 {
2383         for (unsigned int i = 0; i < promises->size(); i++) {
2384                 Promise *promise = (*promises)[i];
2385                 if (promise->has_failed()) {
2386                         priv->failed_promise = true;
2387                         return;
2388                 }
2389         }
2390 }
2391
2392 /**
2393  * @brief Checks promises in response to addition to modification order for
2394  * threads.
2395  *
2396  * We test whether threads are still available for satisfying promises after an
2397  * addition to our modification order constraints. Those that are unavailable
2398  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2399  * that promise has failed.
2400  *
2401  * @param act The ModelAction which updated the modification order
2402  * @param is_read_check Should be true if act is a read and we must check for
2403  * updates to the store from which it read (there is a distinction here for
2404  * RMW's, which are both a load and a store)
2405  */
2406 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2407 {
2408         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2409
2410         for (unsigned int i = 0; i < promises->size(); i++) {
2411                 Promise *promise = (*promises)[i];
2412
2413                 // Is this promise on the same location?
2414                 if (!promise->same_location(write))
2415                         continue;
2416
2417                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2418                         const ModelAction *pread = promise->get_reader(j);
2419                         if (!pread->happens_before(act))
2420                                continue;
2421                         if (mo_graph->checkPromise(write, promise)) {
2422                                 priv->failed_promise = true;
2423                                 return;
2424                         }
2425                         break;
2426                 }
2427
2428                 // Don't do any lookups twice for the same thread
2429                 if (!promise->thread_is_available(act->get_tid()))
2430                         continue;
2431
2432                 if (mo_graph->checkReachable(promise, write)) {
2433                         if (mo_graph->checkPromise(write, promise)) {
2434                                 priv->failed_promise = true;
2435                                 return;
2436                         }
2437                 }
2438         }
2439 }
2440
2441 /**
2442  * Compute the set of writes that may break the current pending release
2443  * sequence. This information is extracted from previou release sequence
2444  * calculations.
2445  *
2446  * @param curr The current ModelAction. Must be a release sequence fixup
2447  * action.
2448  */
2449 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2450 {
2451         if (pending_rel_seqs->empty())
2452                 return;
2453
2454         struct release_seq *pending = pending_rel_seqs->back();
2455         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2456                 const ModelAction *write = pending->writes[i];
2457                 curr->get_node()->add_relseq_break(write);
2458         }
2459
2460         /* NULL means don't break the sequence; just synchronize */
2461         curr->get_node()->add_relseq_break(NULL);
2462 }
2463
2464 /**
2465  * Build up an initial set of all past writes that this 'read' action may read
2466  * from, as well as any previously-observed future values that must still be valid.
2467  *
2468  * @param curr is the current ModelAction that we are exploring; it must be a
2469  * 'read' operation.
2470  */
2471 void ModelExecution::build_may_read_from(ModelAction *curr)
2472 {
2473         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2474         unsigned int i;
2475         ASSERT(curr->is_read());
2476
2477         ModelAction *last_sc_write = NULL;
2478
2479         if (curr->is_seqcst())
2480                 last_sc_write = get_last_seq_cst_write(curr);
2481
2482         /* Iterate over all threads */
2483         for (i = 0; i < thrd_lists->size(); i++) {
2484                 /* Iterate over actions in thread, starting from most recent */
2485                 action_list_t *list = &(*thrd_lists)[i];
2486                 action_list_t::reverse_iterator rit;
2487                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2488                         ModelAction *act = *rit;
2489
2490                         /* Only consider 'write' actions */
2491                         if (!act->is_write() || act == curr)
2492                                 continue;
2493
2494                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2495                         bool allow_read = true;
2496
2497                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2498                                 allow_read = false;
2499                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2500                                 allow_read = false;
2501
2502                         if (allow_read) {
2503                                 /* Only add feasible reads */
2504                                 mo_graph->startChanges();
2505                                 r_modification_order(curr, act);
2506                                 if (!is_infeasible())
2507                                         curr->get_node()->add_read_from_past(act);
2508                                 mo_graph->rollbackChanges();
2509                         }
2510
2511                         /* Include at most one act per-thread that "happens before" curr */
2512                         if (act->happens_before(curr))
2513                                 break;
2514                 }
2515         }
2516
2517         /* Inherit existing, promised future values */
2518         for (i = 0; i < promises->size(); i++) {
2519                 const Promise *promise = (*promises)[i];
2520                 const ModelAction *promise_read = promise->get_reader(0);
2521                 if (promise_read->same_var(curr)) {
2522                         /* Only add feasible future-values */
2523                         mo_graph->startChanges();
2524                         r_modification_order(curr, promise);
2525                         if (!is_infeasible())
2526                                 curr->get_node()->add_read_from_promise(promise_read);
2527                         mo_graph->rollbackChanges();
2528                 }
2529         }
2530
2531         /* We may find no valid may-read-from only if the execution is doomed */
2532         if (!curr->get_node()->read_from_size()) {
2533                 priv->no_valid_reads = true;
2534                 set_assert();
2535         }
2536
2537         if (DBG_ENABLED()) {
2538                 model_print("Reached read action:\n");
2539                 curr->print();
2540                 model_print("Printing read_from_past\n");
2541                 curr->get_node()->print_read_from_past();
2542                 model_print("End printing read_from_past\n");
2543         }
2544 }
2545
2546 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2547 {
2548         for ( ; write != NULL; write = write->get_reads_from()) {
2549                 /* UNINIT actions don't have a Node, and they never sleep */
2550                 if (write->is_uninitialized())
2551                         return true;
2552                 Node *prevnode = write->get_node()->get_parent();
2553
2554                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2555                 if (write->is_release() && thread_sleep)
2556                         return true;
2557                 if (!write->is_rmw())
2558                         return false;
2559         }
2560         return true;
2561 }
2562
2563 /**
2564  * @brief Get an action representing an uninitialized atomic
2565  *
2566  * This function may create a new one or try to retrieve one from the NodeStack
2567  *
2568  * @param curr The current action, which prompts the creation of an UNINIT action
2569  * @return A pointer to the UNINIT ModelAction
2570  */
2571 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2572 {
2573         Node *node = curr->get_node();
2574         ModelAction *act = node->get_uninit_action();
2575         if (!act) {
2576                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2577                 node->set_uninit_action(act);
2578         }
2579         act->create_cv(NULL);
2580         return act;
2581 }
2582
2583 static void print_list(action_list_t *list)
2584 {
2585         action_list_t::iterator it;
2586
2587         model_print("---------------------------------------------------------------------\n");
2588
2589         unsigned int hash = 0;
2590
2591         for (it = list->begin(); it != list->end(); it++) {
2592                 const ModelAction *act = *it;
2593                 if (act->get_seq_number() > 0)
2594                         act->print();
2595                 hash = hash^(hash<<3)^((*it)->hash());
2596         }
2597         model_print("HASH %u\n", hash);
2598         model_print("---------------------------------------------------------------------\n");
2599 }
2600
2601 #if SUPPORT_MOD_ORDER_DUMP
2602 void ModelExecution::dumpGraph(char *filename) const
2603 {
2604         char buffer[200];
2605         sprintf(buffer, "%s.dot", filename);
2606         FILE *file = fopen(buffer, "w");
2607         fprintf(file, "digraph %s {\n", filename);
2608         mo_graph->dumpNodes(file);
2609         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2610
2611         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2612                 ModelAction *act = *it;
2613                 if (act->is_read()) {
2614                         mo_graph->dot_print_node(file, act);
2615                         if (act->get_reads_from())
2616                                 mo_graph->dot_print_edge(file,
2617                                                 act->get_reads_from(),
2618                                                 act,
2619                                                 "label=\"rf\", color=red, weight=2");
2620                         else
2621                                 mo_graph->dot_print_edge(file,
2622                                                 act->get_reads_from_promise(),
2623                                                 act,
2624                                                 "label=\"rf\", color=red");
2625                 }
2626                 if (thread_array[act->get_tid()]) {
2627                         mo_graph->dot_print_edge(file,
2628                                         thread_array[id_to_int(act->get_tid())],
2629                                         act,
2630                                         "label=\"sb\", color=blue, weight=400");
2631                 }
2632
2633                 thread_array[act->get_tid()] = act;
2634         }
2635         fprintf(file, "}\n");
2636         model_free(thread_array);
2637         fclose(file);
2638 }
2639 #endif
2640
2641 /** @brief Prints an execution trace summary. */
2642 void ModelExecution::print_summary() const
2643 {
2644 #if SUPPORT_MOD_ORDER_DUMP
2645         char buffername[100];
2646         sprintf(buffername, "exec%04u", execution_number);
2647         mo_graph->dumpGraphToFile(buffername);
2648         sprintf(buffername, "graph%04u", execution_number);
2649         dumpGraph(buffername);
2650 #endif
2651
2652         model_print("Execution %d:", execution_number);
2653         if (isfeasibleprefix()) {
2654                 if (scheduler->all_threads_sleeping())
2655                         model_print(" SLEEP-SET REDUNDANT");
2656                 model_print("\n");
2657         } else
2658                 print_infeasibility(" INFEASIBLE");
2659         print_list(action_trace);
2660         model_print("\n");
2661         if (!promises->empty()) {
2662                 model_print("Pending promises:\n");
2663                 for (unsigned int i = 0; i < promises->size(); i++) {
2664                         model_print(" [P%u] ", i);
2665                         (*promises)[i]->print();
2666                 }
2667                 model_print("\n");
2668         }
2669 }
2670
2671 /**
2672  * Add a Thread to the system for the first time. Should only be called once
2673  * per thread.
2674  * @param t The Thread to add
2675  */
2676 void ModelExecution::add_thread(Thread *t)
2677 {
2678         thread_map->put(id_to_int(t->get_id()), t);
2679         if (!t->is_model_thread())
2680                 scheduler->add_thread(t);
2681 }
2682
2683 /**
2684  * @brief Get a Thread reference by its ID
2685  * @param tid The Thread's ID
2686  * @return A Thread reference
2687  */
2688 Thread * ModelExecution::get_thread(thread_id_t tid) const
2689 {
2690         return thread_map->get(id_to_int(tid));
2691 }
2692
2693 /**
2694  * @brief Get a reference to the Thread in which a ModelAction was executed
2695  * @param act The ModelAction
2696  * @return A Thread reference
2697  */
2698 Thread * ModelExecution::get_thread(const ModelAction *act) const
2699 {
2700         return get_thread(act->get_tid());
2701 }
2702
2703 /**
2704  * @brief Get a Promise's "promise number"
2705  *
2706  * A "promise number" is an index number that is unique to a promise, valid
2707  * only for a specific snapshot of an execution trace. Promises may come and go
2708  * as they are generated an resolved, so an index only retains meaning for the
2709  * current snapshot.
2710  *
2711  * @param promise The Promise to check
2712  * @return The promise index, if the promise still is valid; otherwise -1
2713  */
2714 int ModelExecution::get_promise_number(const Promise *promise) const
2715 {
2716         for (unsigned int i = 0; i < promises->size(); i++)
2717                 if ((*promises)[i] == promise)
2718                         return i;
2719         /* Not found */
2720         return -1;
2721 }
2722
2723 /**
2724  * @brief Check if a Thread is currently enabled
2725  * @param t The Thread to check
2726  * @return True if the Thread is currently enabled
2727  */
2728 bool ModelExecution::is_enabled(Thread *t) const
2729 {
2730         return scheduler->is_enabled(t);
2731 }
2732
2733 /**
2734  * @brief Check if a Thread is currently enabled
2735  * @param tid The ID of the Thread to check
2736  * @return True if the Thread is currently enabled
2737  */
2738 bool ModelExecution::is_enabled(thread_id_t tid) const
2739 {
2740         return scheduler->is_enabled(tid);
2741 }
2742
2743 /**
2744  * @brief Select the next thread to execute based on the curren action
2745  *
2746  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2747  * actions should be followed by the execution of their child thread. In either
2748  * case, the current action should determine the next thread schedule.
2749  *
2750  * @param curr The current action
2751  * @return The next thread to run, if the current action will determine this
2752  * selection; otherwise NULL
2753  */
2754 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2755 {
2756         /* Do not split atomic RMW */
2757         if (curr->is_rmwr())
2758                 return get_thread(curr);
2759         /* Follow CREATE with the created thread */
2760         if (curr->get_type() == THREAD_CREATE)
2761                 return curr->get_thread_operand();
2762         return NULL;
2763 }
2764
2765 /** @return True if the execution has taken too many steps */
2766 bool ModelExecution::too_many_steps() const
2767 {
2768         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2769 }
2770
2771 /**
2772  * Takes the next step in the execution, if possible.
2773  * @param curr The current step to take
2774  * @return Returns the next Thread to run, if any; NULL if this execution
2775  * should terminate
2776  */
2777 Thread * ModelExecution::take_step(ModelAction *curr)
2778 {
2779         Thread *curr_thrd = get_thread(curr);
2780         ASSERT(curr_thrd->get_state() == THREAD_READY);
2781
2782         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2783         curr = check_current_action(curr);
2784         ASSERT(curr);
2785
2786         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2787                 scheduler->remove_thread(curr_thrd);
2788
2789         return action_select_next_thread(curr);
2790 }
2791
2792 /**
2793  * Launch end-of-execution release sequence fixups only when
2794  * the execution is otherwise feasible AND there are:
2795  *
2796  * (1) pending release sequences
2797  * (2) pending assertions that could be invalidated by a change
2798  * in clock vectors (i.e., data races)
2799  * (3) no pending promises
2800  */
2801 void ModelExecution::fixup_release_sequences()
2802 {
2803         while (!pending_rel_seqs->empty() &&
2804                         is_feasible_prefix_ignore_relseq() &&
2805                         !unrealizedraces.empty()) {
2806                 model_print("*** WARNING: release sequence fixup action "
2807                                 "(%zu pending release seuqence(s)) ***\n",
2808                                 pending_rel_seqs->size());
2809                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2810                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2811                                 model_thread);
2812                 take_step(fixup);
2813         };
2814 }