BROKEN: restructure much of ModelChecker as ModelExecution class
[model-checker.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "execution.h"
8 #include "model.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "snapshot-interface.h"
13 #include "common.h"
14 #include "clockvector.h"
15 #include "cyclegraph.h"
16 #include "promise.h"
17 #include "datarace.h"
18 #include "threads-model.h"
19 #include "output.h"
20 #include "bugmessage.h"
21
22 #define INITIAL_THREAD_ID       0
23
24 /**
25  * Structure for holding small ModelChecker members that should be snapshotted
26  */
27 struct model_snapshot_members {
28         model_snapshot_members() :
29                 /* First thread created will have id INITIAL_THREAD_ID */
30                 next_thread_id(INITIAL_THREAD_ID),
31                 used_sequence_numbers(0),
32                 next_backtrack(NULL),
33                 bugs(),
34                 stats(),
35                 failed_promise(false),
36                 too_many_reads(false),
37                 no_valid_reads(false),
38                 bad_synchronization(false),
39                 asserted(false)
40         { }
41
42         ~model_snapshot_members() {
43                 for (unsigned int i = 0; i < bugs.size(); i++)
44                         delete bugs[i];
45                 bugs.clear();
46         }
47
48         unsigned int next_thread_id;
49         modelclock_t used_sequence_numbers;
50         ModelAction *next_backtrack;
51         SnapVector<bug_message *> bugs;
52         struct execution_stats stats;
53         bool failed_promise;
54         bool too_many_reads;
55         bool no_valid_reads;
56         /** @brief Incorrectly-ordered synchronization was made */
57         bool bad_synchronization;
58         bool asserted;
59
60         SNAPSHOTALLOC
61 };
62
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(struct model_params *params, Scheduler *scheduler, NodeStack *node_stack) :
65         params(params),
66         scheduler(scheduler),
67         action_trace(new action_list_t()),
68         thread_map(new HashTable<int, Thread *, int>()),
69         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
70         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
71         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
72         promises(new SnapVector<Promise *>()),
73         futurevalues(new SnapVector<struct PendingFutureValue>()),
74         pending_rel_seqs(new SnapVector<struct release_seq *>()),
75         thrd_last_action(new SnapVector<ModelAction *>(1)),
76         thrd_last_fence_release(new SnapVector<ModelAction *>()),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph()),
80         execution_number(1)
81 {
82         /* Initialize a model-checker thread, for special ModelActions */
83         model_thread = new Thread(get_next_id());
84         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
85 }
86
87 /** @brief Destructor */
88 ModelExecution::~ModelExecution()
89 {
90         for (unsigned int i = 0; i < get_num_threads(); i++)
91                 delete thread_map->get(i);
92         delete thread_map;
93
94         delete obj_thrd_map;
95         delete obj_map;
96         delete condvar_waiters_map;
97         delete action_trace;
98
99         for (unsigned int i = 0; i < promises->size(); i++)
100                 delete (*promises)[i];
101         delete promises;
102
103         delete pending_rel_seqs;
104
105         delete thrd_last_action;
106         delete thrd_last_fence_release;
107         delete mo_graph;
108         delete priv;
109 }
110
111 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
112 {
113         action_list_t *tmp = hash->get(ptr);
114         if (tmp == NULL) {
115                 tmp = new action_list_t();
116                 hash->put(ptr, tmp);
117         }
118         return tmp;
119 }
120
121 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
122 {
123         SnapVector<action_list_t> *tmp = hash->get(ptr);
124         if (tmp == NULL) {
125                 tmp = new SnapVector<action_list_t>();
126                 hash->put(ptr, tmp);
127         }
128         return tmp;
129 }
130
131 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) {
132         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
133         if (wrv==NULL)
134                 return NULL;
135         unsigned int thread=id_to_int(tid);
136         if (thread < wrv->size())
137                 return &(*wrv)[thread];
138         else
139                 return NULL;
140 }
141
142 /** @return a thread ID for a new Thread */
143 thread_id_t ModelExecution::get_next_id()
144 {
145         return priv->next_thread_id++;
146 }
147
148 /** @return the number of user threads created during this execution */
149 unsigned int ModelExecution::get_num_threads() const
150 {
151         return priv->next_thread_id;
152 }
153
154 /** @return a sequence number for a new ModelAction */
155 modelclock_t ModelExecution::get_next_seq_num()
156 {
157         return ++priv->used_sequence_numbers;
158 }
159
160 /**
161  * @brief Should the current action wake up a given thread?
162  *
163  * @param curr The current action
164  * @param thread The thread that we might wake up
165  * @return True, if we should wake up the sleeping thread; false otherwise
166  */
167 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
168 {
169         const ModelAction *asleep = thread->get_pending();
170         /* Don't allow partial RMW to wake anyone up */
171         if (curr->is_rmwr())
172                 return false;
173         /* Synchronizing actions may have been backtracked */
174         if (asleep->could_synchronize_with(curr))
175                 return true;
176         /* All acquire/release fences and fence-acquire/store-release */
177         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
178                 return true;
179         /* Fence-release + store can awake load-acquire on the same location */
180         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
181                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
182                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
183                         return true;
184         }
185         return false;
186 }
187
188 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
189 {
190         for (unsigned int i = 0; i < get_num_threads(); i++) {
191                 Thread *thr = get_thread(int_to_id(i));
192                 if (scheduler->is_sleep_set(thr)) {
193                         if (should_wake_up(curr, thr))
194                                 /* Remove this thread from sleep set */
195                                 scheduler->remove_sleep(thr);
196                 }
197         }
198 }
199
200 /** @brief Alert the model-checker that an incorrectly-ordered
201  * synchronization was made */
202 void ModelExecution::set_bad_synchronization()
203 {
204         priv->bad_synchronization = true;
205 }
206
207 bool ModelExecution::assert_bug(const char *msg)
208 {
209         priv->bugs.push_back(new bug_message(msg));
210
211         if (isfeasibleprefix()) {
212                 set_assert();
213                 return true;
214         }
215         return false;
216 }
217
218 /** @return True, if any bugs have been reported for this execution */
219 bool ModelExecution::have_bug_reports() const
220 {
221         return priv->bugs.size() != 0;
222 }
223
224 SnapVector<bug_message *> * ModelExecution::get_bugs() const
225 {
226         return &priv->bugs;
227 }
228
229 /**
230  * Check whether the current trace has triggered an assertion which should halt
231  * its execution.
232  *
233  * @return True, if the execution should be aborted; false otherwise
234  */
235 bool ModelExecution::has_asserted() const
236 {
237         return priv->asserted;
238 }
239
240 /**
241  * Trigger a trace assertion which should cause this execution to be halted.
242  * This can be due to a detected bug or due to an infeasibility that should
243  * halt ASAP.
244  */
245 void ModelExecution::set_assert()
246 {
247         priv->asserted = true;
248 }
249
250 /**
251  * Check if we are in a deadlock. Should only be called at the end of an
252  * execution, although it should not give false positives in the middle of an
253  * execution (there should be some ENABLED thread).
254  *
255  * @return True if program is in a deadlock; false otherwise
256  */
257 bool ModelExecution::is_deadlocked() const
258 {
259         bool blocking_threads = false;
260         for (unsigned int i = 0; i < get_num_threads(); i++) {
261                 thread_id_t tid = int_to_id(i);
262                 if (is_enabled(tid))
263                         return false;
264                 Thread *t = get_thread(tid);
265                 if (!t->is_model_thread() && t->get_pending())
266                         blocking_threads = true;
267         }
268         return blocking_threads;
269 }
270
271 /**
272  * Check if this is a complete execution. That is, have all thread completed
273  * execution (rather than exiting because sleep sets have forced a redundant
274  * execution).
275  *
276  * @return True if the execution is complete.
277  */
278 bool ModelExecution::is_complete_execution() const
279 {
280         for (unsigned int i = 0; i < get_num_threads(); i++)
281                 if (is_enabled(int_to_id(i)))
282                         return false;
283         return true;
284 }
285
286 /**
287  * @brief Find the last fence-related backtracking conflict for a ModelAction
288  *
289  * This function performs the search for the most recent conflicting action
290  * against which we should perform backtracking, as affected by fence
291  * operations. This includes pairs of potentially-synchronizing actions which
292  * occur due to fence-acquire or fence-release, and hence should be explored in
293  * the opposite execution order.
294  *
295  * @param act The current action
296  * @return The most recent action which conflicts with act due to fences
297  */
298 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
299 {
300         /* Only perform release/acquire fence backtracking for stores */
301         if (!act->is_write())
302                 return NULL;
303
304         /* Find a fence-release (or, act is a release) */
305         ModelAction *last_release;
306         if (act->is_release())
307                 last_release = act;
308         else
309                 last_release = get_last_fence_release(act->get_tid());
310         if (!last_release)
311                 return NULL;
312
313         /* Skip past the release */
314         action_list_t *list = action_trace;
315         action_list_t::reverse_iterator rit;
316         for (rit = list->rbegin(); rit != list->rend(); rit++)
317                 if (*rit == last_release)
318                         break;
319         ASSERT(rit != list->rend());
320
321         /* Find a prior:
322          *   load-acquire
323          * or
324          *   load --sb-> fence-acquire */
325         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
326         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
327         bool found_acquire_fences = false;
328         for ( ; rit != list->rend(); rit++) {
329                 ModelAction *prev = *rit;
330                 if (act->same_thread(prev))
331                         continue;
332
333                 int tid = id_to_int(prev->get_tid());
334
335                 if (prev->is_read() && act->same_var(prev)) {
336                         if (prev->is_acquire()) {
337                                 /* Found most recent load-acquire, don't need
338                                  * to search for more fences */
339                                 if (!found_acquire_fences)
340                                         return NULL;
341                         } else {
342                                 prior_loads[tid] = prev;
343                         }
344                 }
345                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
346                         found_acquire_fences = true;
347                         acquire_fences[tid] = prev;
348                 }
349         }
350
351         ModelAction *latest_backtrack = NULL;
352         for (unsigned int i = 0; i < acquire_fences.size(); i++)
353                 if (acquire_fences[i] && prior_loads[i])
354                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
355                                 latest_backtrack = acquire_fences[i];
356         return latest_backtrack;
357 }
358
359 /**
360  * @brief Find the last backtracking conflict for a ModelAction
361  *
362  * This function performs the search for the most recent conflicting action
363  * against which we should perform backtracking. This primary includes pairs of
364  * synchronizing actions which should be explored in the opposite execution
365  * order.
366  *
367  * @param act The current action
368  * @return The most recent action which conflicts with act
369  */
370 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
371 {
372         switch (act->get_type()) {
373         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
374         case ATOMIC_READ:
375         case ATOMIC_WRITE:
376         case ATOMIC_RMW: {
377                 ModelAction *ret = NULL;
378
379                 /* linear search: from most recent to oldest */
380                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
381                 action_list_t::reverse_iterator rit;
382                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
383                         ModelAction *prev = *rit;
384                         if (prev->could_synchronize_with(act)) {
385                                 ret = prev;
386                                 break;
387                         }
388                 }
389
390                 ModelAction *ret2 = get_last_fence_conflict(act);
391                 if (!ret2)
392                         return ret;
393                 if (!ret)
394                         return ret2;
395                 if (*ret < *ret2)
396                         return ret2;
397                 return ret;
398         }
399         case ATOMIC_LOCK:
400         case ATOMIC_TRYLOCK: {
401                 /* linear search: from most recent to oldest */
402                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
403                 action_list_t::reverse_iterator rit;
404                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
405                         ModelAction *prev = *rit;
406                         if (act->is_conflicting_lock(prev))
407                                 return prev;
408                 }
409                 break;
410         }
411         case ATOMIC_UNLOCK: {
412                 /* linear search: from most recent to oldest */
413                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
414                 action_list_t::reverse_iterator rit;
415                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
416                         ModelAction *prev = *rit;
417                         if (!act->same_thread(prev) && prev->is_failed_trylock())
418                                 return prev;
419                 }
420                 break;
421         }
422         case ATOMIC_WAIT: {
423                 /* linear search: from most recent to oldest */
424                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
425                 action_list_t::reverse_iterator rit;
426                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
427                         ModelAction *prev = *rit;
428                         if (!act->same_thread(prev) && prev->is_failed_trylock())
429                                 return prev;
430                         if (!act->same_thread(prev) && prev->is_notify())
431                                 return prev;
432                 }
433                 break;
434         }
435
436         case ATOMIC_NOTIFY_ALL:
437         case ATOMIC_NOTIFY_ONE: {
438                 /* linear search: from most recent to oldest */
439                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
440                 action_list_t::reverse_iterator rit;
441                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
442                         ModelAction *prev = *rit;
443                         if (!act->same_thread(prev) && prev->is_wait())
444                                 return prev;
445                 }
446                 break;
447         }
448         default:
449                 break;
450         }
451         return NULL;
452 }
453
454 /** This method finds backtracking points where we should try to
455  * reorder the parameter ModelAction against.
456  *
457  * @param the ModelAction to find backtracking points for.
458  */
459 void ModelExecution::set_backtracking(ModelAction *act)
460 {
461         Thread *t = get_thread(act);
462         ModelAction *prev = get_last_conflict(act);
463         if (prev == NULL)
464                 return;
465
466         Node *node = prev->get_node()->get_parent();
467
468         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
469         int low_tid, high_tid;
470         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
471                 low_tid = id_to_int(act->get_tid());
472                 high_tid = low_tid + 1;
473         } else {
474                 low_tid = 0;
475                 high_tid = get_num_threads();
476         }
477
478         for (int i = low_tid; i < high_tid; i++) {
479                 thread_id_t tid = int_to_id(i);
480
481                 /* Make sure this thread can be enabled here. */
482                 if (i >= node->get_num_threads())
483                         break;
484
485                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
486                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
487                 if (node->enabled_status(tid) != THREAD_ENABLED)
488                         continue;
489
490                 /* Check if this has been explored already */
491                 if (node->has_been_explored(tid))
492                         continue;
493
494                 /* See if fairness allows */
495                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
496                         bool unfair = false;
497                         for (int t = 0; t < node->get_num_threads(); t++) {
498                                 thread_id_t tother = int_to_id(t);
499                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
500                                         unfair = true;
501                                         break;
502                                 }
503                         }
504                         if (unfair)
505                                 continue;
506                 }
507
508                 /* See if CHESS-like yield fairness allows */
509                 if (params->yieldon) {
510                         bool unfair = false;
511                         for (int t = 0; t < node->get_num_threads(); t++) {
512                                 thread_id_t tother = int_to_id(t);
513                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
514                                         unfair = true;
515                                         break;
516                                 }
517                         }
518                         if (unfair)
519                                 continue;
520                 }
521
522                 /* Cache the latest backtracking point */
523                 set_latest_backtrack(prev);
524
525                 /* If this is a new backtracking point, mark the tree */
526                 if (!node->set_backtrack(tid))
527                         continue;
528                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
529                                         id_to_int(prev->get_tid()),
530                                         id_to_int(t->get_id()));
531                 if (DBG_ENABLED()) {
532                         prev->print();
533                         act->print();
534                 }
535         }
536 }
537
538 /**
539  * @brief Cache the a backtracking point as the "most recent", if eligible
540  *
541  * Note that this does not prepare the NodeStack for this backtracking
542  * operation, it only caches the action on a per-execution basis
543  *
544  * @param act The operation at which we should explore a different next action
545  * (i.e., backtracking point)
546  * @return True, if this action is now the most recent backtracking point;
547  * false otherwise
548  */
549 bool ModelExecution::set_latest_backtrack(ModelAction *act)
550 {
551         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
552                 priv->next_backtrack = act;
553                 return true;
554         }
555         return false;
556 }
557
558 /**
559  * Returns last backtracking point. The model checker will explore a different
560  * path for this point in the next execution.
561  * @return The ModelAction at which the next execution should diverge.
562  */
563 ModelAction * ModelExecution::get_next_backtrack()
564 {
565         ModelAction *next = priv->next_backtrack;
566         priv->next_backtrack = NULL;
567         return next;
568 }
569
570 /**
571  * Processes a read model action.
572  * @param curr is the read model action to process.
573  * @return True if processing this read updates the mo_graph.
574  */
575 bool ModelExecution::process_read(ModelAction *curr)
576 {
577         Node *node = curr->get_node();
578         while (true) {
579                 bool updated = false;
580                 switch (node->get_read_from_status()) {
581                 case READ_FROM_PAST: {
582                         const ModelAction *rf = node->get_read_from_past();
583                         ASSERT(rf);
584
585                         mo_graph->startChanges();
586
587                         ASSERT(!is_infeasible());
588                         if (!check_recency(curr, rf)) {
589                                 if (node->increment_read_from()) {
590                                         mo_graph->rollbackChanges();
591                                         continue;
592                                 } else {
593                                         priv->too_many_reads = true;
594                                 }
595                         }
596
597                         updated = r_modification_order(curr, rf);
598                         read_from(curr, rf);
599                         mo_graph->commitChanges();
600                         mo_check_promises(curr, true);
601                         break;
602                 }
603                 case READ_FROM_PROMISE: {
604                         Promise *promise = curr->get_node()->get_read_from_promise();
605                         if (promise->add_reader(curr))
606                                 priv->failed_promise = true;
607                         curr->set_read_from_promise(promise);
608                         mo_graph->startChanges();
609                         if (!check_recency(curr, promise))
610                                 priv->too_many_reads = true;
611                         updated = r_modification_order(curr, promise);
612                         mo_graph->commitChanges();
613                         break;
614                 }
615                 case READ_FROM_FUTURE: {
616                         /* Read from future value */
617                         struct future_value fv = node->get_future_value();
618                         Promise *promise = new Promise(curr, fv);
619                         curr->set_read_from_promise(promise);
620                         promises->push_back(promise);
621                         mo_graph->startChanges();
622                         updated = r_modification_order(curr, promise);
623                         mo_graph->commitChanges();
624                         break;
625                 }
626                 default:
627                         ASSERT(false);
628                 }
629                 get_thread(curr)->set_return_value(curr->get_return_value());
630                 return updated;
631         }
632 }
633
634 /**
635  * Processes a lock, trylock, or unlock model action.  @param curr is
636  * the read model action to process.
637  *
638  * The try lock operation checks whether the lock is taken.  If not,
639  * it falls to the normal lock operation case.  If so, it returns
640  * fail.
641  *
642  * The lock operation has already been checked that it is enabled, so
643  * it just grabs the lock and synchronizes with the previous unlock.
644  *
645  * The unlock operation has to re-enable all of the threads that are
646  * waiting on the lock.
647  *
648  * @return True if synchronization was updated; false otherwise
649  */
650 bool ModelExecution::process_mutex(ModelAction *curr)
651 {
652         std::mutex *mutex = curr->get_mutex();
653         struct std::mutex_state *state = NULL;
654
655         if (mutex)
656                 state = mutex->get_state();
657
658         switch (curr->get_type()) {
659         case ATOMIC_TRYLOCK: {
660                 bool success = !state->locked;
661                 curr->set_try_lock(success);
662                 if (!success) {
663                         get_thread(curr)->set_return_value(0);
664                         break;
665                 }
666                 get_thread(curr)->set_return_value(1);
667         }
668                 //otherwise fall into the lock case
669         case ATOMIC_LOCK: {
670                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
671                         assert_bug("Lock access before initialization");
672                 state->locked = get_thread(curr);
673                 ModelAction *unlock = get_last_unlock(curr);
674                 //synchronize with the previous unlock statement
675                 if (unlock != NULL) {
676                         synchronize(unlock, curr);
677                         return true;
678                 }
679                 break;
680         }
681         case ATOMIC_WAIT:
682         case ATOMIC_UNLOCK: {
683                 /* wake up the other threads */
684                 for (unsigned int i = 0; i < get_num_threads(); i++) {
685                         Thread *t = get_thread(int_to_id(i));
686                         Thread *curr_thrd = get_thread(curr);
687                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
688                                 scheduler->wake(t);
689                 }
690
691                 /* unlock the lock - after checking who was waiting on it */
692                 state->locked = NULL;
693
694                 if (!curr->is_wait())
695                         break; /* The rest is only for ATOMIC_WAIT */
696
697                 /* Should we go to sleep? (simulate spurious failures) */
698                 if (curr->get_node()->get_misc() == 0) {
699                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
700                         /* disable us */
701                         scheduler->sleep(get_thread(curr));
702                 }
703                 break;
704         }
705         case ATOMIC_NOTIFY_ALL: {
706                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
707                 //activate all the waiting threads
708                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
709                         scheduler->wake(get_thread(*rit));
710                 }
711                 waiters->clear();
712                 break;
713         }
714         case ATOMIC_NOTIFY_ONE: {
715                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
716                 int wakeupthread = curr->get_node()->get_misc();
717                 action_list_t::iterator it = waiters->begin();
718                 advance(it, wakeupthread);
719                 scheduler->wake(get_thread(*it));
720                 waiters->erase(it);
721                 break;
722         }
723
724         default:
725                 ASSERT(0);
726         }
727         return false;
728 }
729
730 /**
731  * @brief Check if the current pending promises allow a future value to be sent
732  *
733  * If one of the following is true:
734  *  (a) there are no pending promises
735  *  (b) the reader and writer do not cross any promises
736  * Then, it is safe to pass a future value back now.
737  *
738  * Otherwise, we must save the pending future value until (a) or (b) is true
739  *
740  * @param writer The operation which sends the future value. Must be a write.
741  * @param reader The operation which will observe the value. Must be a read.
742  * @return True if the future value can be sent now; false if it must wait.
743  */
744 bool ModelExecution::promises_may_allow(const ModelAction *writer,
745                 const ModelAction *reader) const
746 {
747         if (promises->empty())
748                 return true;
749         for(int i=promises->size()-1;i>=0;i--) {
750                 ModelAction *pr=(*promises)[i]->get_reader(0);
751                 //reader is after promise...doesn't cross any promise
752                 if (*reader > *pr)
753                         return true;
754                 //writer is after promise, reader before...bad...
755                 if (*writer > *pr)
756                         return false;
757         }
758         return true;
759 }
760
761 /**
762  * @brief Add a future value to a reader
763  *
764  * This function performs a few additional checks to ensure that the future
765  * value can be feasibly observed by the reader
766  *
767  * @param writer The operation whose value is sent. Must be a write.
768  * @param reader The read operation which may read the future value. Must be a read.
769  */
770 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
771 {
772         /* Do more ambitious checks now that mo is more complete */
773         if (!mo_may_allow(writer, reader))
774                 return;
775
776         Node *node = reader->get_node();
777
778         /* Find an ancestor thread which exists at the time of the reader */
779         Thread *write_thread = get_thread(writer);
780         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
781                 write_thread = write_thread->get_parent();
782
783         struct future_value fv = {
784                 writer->get_write_value(),
785                 writer->get_seq_number() + params->maxfuturedelay,
786                 write_thread->get_id(),
787         };
788         if (node->add_future_value(fv))
789                 set_latest_backtrack(reader);
790 }
791
792 /**
793  * Process a write ModelAction
794  * @param curr The ModelAction to process
795  * @return True if the mo_graph was updated or promises were resolved
796  */
797 bool ModelExecution::process_write(ModelAction *curr)
798 {
799         /* Readers to which we may send our future value */
800         ModelVector<ModelAction *> send_fv;
801
802         const ModelAction *earliest_promise_reader;
803         bool updated_promises = false;
804
805         bool updated_mod_order = w_modification_order(curr, &send_fv);
806         Promise *promise = pop_promise_to_resolve(curr);
807
808         if (promise) {
809                 earliest_promise_reader = promise->get_reader(0);
810                 updated_promises = resolve_promise(curr, promise);
811         } else
812                 earliest_promise_reader = NULL;
813
814         for (unsigned int i = 0; i < send_fv.size(); i++) {
815                 ModelAction *read = send_fv[i];
816
817                 /* Don't send future values to reads after the Promise we resolve */
818                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
819                         /* Check if future value can be sent immediately */
820                         if (promises_may_allow(curr, read)) {
821                                 add_future_value(curr, read);
822                         } else {
823                                 futurevalues->push_back(PendingFutureValue(curr, read));
824                         }
825                 }
826         }
827
828         /* Check the pending future values */
829         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
830                 struct PendingFutureValue pfv = (*futurevalues)[i];
831                 if (promises_may_allow(pfv.writer, pfv.reader)) {
832                         add_future_value(pfv.writer, pfv.reader);
833                         futurevalues->erase(futurevalues->begin() + i);
834                 }
835         }
836
837         mo_graph->commitChanges();
838         mo_check_promises(curr, false);
839
840         get_thread(curr)->set_return_value(VALUE_NONE);
841         return updated_mod_order || updated_promises;
842 }
843
844 /**
845  * Process a fence ModelAction
846  * @param curr The ModelAction to process
847  * @return True if synchronization was updated
848  */
849 bool ModelExecution::process_fence(ModelAction *curr)
850 {
851         /*
852          * fence-relaxed: no-op
853          * fence-release: only log the occurence (not in this function), for
854          *   use in later synchronization
855          * fence-acquire (this function): search for hypothetical release
856          *   sequences
857          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
858          */
859         bool updated = false;
860         if (curr->is_acquire()) {
861                 action_list_t *list = action_trace;
862                 action_list_t::reverse_iterator rit;
863                 /* Find X : is_read(X) && X --sb-> curr */
864                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
865                         ModelAction *act = *rit;
866                         if (act == curr)
867                                 continue;
868                         if (act->get_tid() != curr->get_tid())
869                                 continue;
870                         /* Stop at the beginning of the thread */
871                         if (act->is_thread_start())
872                                 break;
873                         /* Stop once we reach a prior fence-acquire */
874                         if (act->is_fence() && act->is_acquire())
875                                 break;
876                         if (!act->is_read())
877                                 continue;
878                         /* read-acquire will find its own release sequences */
879                         if (act->is_acquire())
880                                 continue;
881
882                         /* Establish hypothetical release sequences */
883                         rel_heads_list_t release_heads;
884                         get_release_seq_heads(curr, act, &release_heads);
885                         for (unsigned int i = 0; i < release_heads.size(); i++)
886                                 synchronize(release_heads[i], curr);
887                         if (release_heads.size() != 0)
888                                 updated = true;
889                 }
890         }
891         return updated;
892 }
893
894 /**
895  * @brief Process the current action for thread-related activity
896  *
897  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
898  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
899  * synchronization, etc.  This function is a no-op for non-THREAD actions
900  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
901  *
902  * @param curr The current action
903  * @return True if synchronization was updated or a thread completed
904  */
905 bool ModelExecution::process_thread_action(ModelAction *curr)
906 {
907         bool updated = false;
908
909         switch (curr->get_type()) {
910         case THREAD_CREATE: {
911                 thrd_t *thrd = (thrd_t *)curr->get_location();
912                 struct thread_params *params = (struct thread_params *)curr->get_value();
913                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
914                 add_thread(th);
915                 th->set_creation(curr);
916                 /* Promises can be satisfied by children */
917                 for (unsigned int i = 0; i < promises->size(); i++) {
918                         Promise *promise = (*promises)[i];
919                         if (promise->thread_is_available(curr->get_tid()))
920                                 promise->add_thread(th->get_id());
921                 }
922                 break;
923         }
924         case THREAD_JOIN: {
925                 Thread *blocking = curr->get_thread_operand();
926                 ModelAction *act = get_last_action(blocking->get_id());
927                 synchronize(act, curr);
928                 updated = true; /* trigger rel-seq checks */
929                 break;
930         }
931         case THREAD_FINISH: {
932                 Thread *th = get_thread(curr);
933                 /* Wake up any joining threads */
934                 for (unsigned int i = 0; i < get_num_threads(); i++) {
935                         Thread *waiting = get_thread(int_to_id(i));
936                         if (waiting->waiting_on() == th &&
937                                         waiting->get_pending()->is_thread_join())
938                                 scheduler->wake(waiting);
939                 }
940                 th->complete();
941                 /* Completed thread can't satisfy promises */
942                 for (unsigned int i = 0; i < promises->size(); i++) {
943                         Promise *promise = (*promises)[i];
944                         if (promise->thread_is_available(th->get_id()))
945                                 if (promise->eliminate_thread(th->get_id()))
946                                         priv->failed_promise = true;
947                 }
948                 updated = true; /* trigger rel-seq checks */
949                 break;
950         }
951         case THREAD_START: {
952                 check_promises(curr->get_tid(), NULL, curr->get_cv());
953                 break;
954         }
955         default:
956                 break;
957         }
958
959         return updated;
960 }
961
962 /**
963  * @brief Process the current action for release sequence fixup activity
964  *
965  * Performs model-checker release sequence fixups for the current action,
966  * forcing a single pending release sequence to break (with a given, potential
967  * "loose" write) or to complete (i.e., synchronize). If a pending release
968  * sequence forms a complete release sequence, then we must perform the fixup
969  * synchronization, mo_graph additions, etc.
970  *
971  * @param curr The current action; must be a release sequence fixup action
972  * @param work_queue The work queue to which to add work items as they are
973  * generated
974  */
975 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
976 {
977         const ModelAction *write = curr->get_node()->get_relseq_break();
978         struct release_seq *sequence = pending_rel_seqs->back();
979         pending_rel_seqs->pop_back();
980         ASSERT(sequence);
981         ModelAction *acquire = sequence->acquire;
982         const ModelAction *rf = sequence->rf;
983         const ModelAction *release = sequence->release;
984         ASSERT(acquire);
985         ASSERT(release);
986         ASSERT(rf);
987         ASSERT(release->same_thread(rf));
988
989         if (write == NULL) {
990                 /**
991                  * @todo Forcing a synchronization requires that we set
992                  * modification order constraints. For instance, we can't allow
993                  * a fixup sequence in which two separate read-acquire
994                  * operations read from the same sequence, where the first one
995                  * synchronizes and the other doesn't. Essentially, we can't
996                  * allow any writes to insert themselves between 'release' and
997                  * 'rf'
998                  */
999
1000                 /* Must synchronize */
1001                 if (!synchronize(release, acquire))
1002                         return;
1003                 /* Re-check all pending release sequences */
1004                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1005                 /* Re-check act for mo_graph edges */
1006                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1007
1008                 /* propagate synchronization to later actions */
1009                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1010                 for (; (*rit) != acquire; rit++) {
1011                         ModelAction *propagate = *rit;
1012                         if (acquire->happens_before(propagate)) {
1013                                 synchronize(acquire, propagate);
1014                                 /* Re-check 'propagate' for mo_graph edges */
1015                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1016                         }
1017                 }
1018         } else {
1019                 /* Break release sequence with new edges:
1020                  *   release --mo--> write --mo--> rf */
1021                 mo_graph->addEdge(release, write);
1022                 mo_graph->addEdge(write, rf);
1023         }
1024
1025         /* See if we have realized a data race */
1026         checkDataRaces();
1027 }
1028
1029 /**
1030  * Initialize the current action by performing one or more of the following
1031  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1032  * in the NodeStack, manipulating backtracking sets, allocating and
1033  * initializing clock vectors, and computing the promises to fulfill.
1034  *
1035  * @param curr The current action, as passed from the user context; may be
1036  * freed/invalidated after the execution of this function, with a different
1037  * action "returned" its place (pass-by-reference)
1038  * @return True if curr is a newly-explored action; false otherwise
1039  */
1040 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1041 {
1042         ModelAction *newcurr;
1043
1044         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1045                 newcurr = process_rmw(*curr);
1046                 delete *curr;
1047
1048                 if (newcurr->is_rmw())
1049                         compute_promises(newcurr);
1050
1051                 *curr = newcurr;
1052                 return false;
1053         }
1054
1055         (*curr)->set_seq_number(get_next_seq_num());
1056
1057         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1058         if (newcurr) {
1059                 /* First restore type and order in case of RMW operation */
1060                 if ((*curr)->is_rmwr())
1061                         newcurr->copy_typeandorder(*curr);
1062
1063                 ASSERT((*curr)->get_location() == newcurr->get_location());
1064                 newcurr->copy_from_new(*curr);
1065
1066                 /* Discard duplicate ModelAction; use action from NodeStack */
1067                 delete *curr;
1068
1069                 /* Always compute new clock vector */
1070                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1071
1072                 *curr = newcurr;
1073                 return false; /* Action was explored previously */
1074         } else {
1075                 newcurr = *curr;
1076
1077                 /* Always compute new clock vector */
1078                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1079
1080                 /* Assign most recent release fence */
1081                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1082
1083                 /*
1084                  * Perform one-time actions when pushing new ModelAction onto
1085                  * NodeStack
1086                  */
1087                 if (newcurr->is_write())
1088                         compute_promises(newcurr);
1089                 else if (newcurr->is_relseq_fixup())
1090                         compute_relseq_breakwrites(newcurr);
1091                 else if (newcurr->is_wait())
1092                         newcurr->get_node()->set_misc_max(2);
1093                 else if (newcurr->is_notify_one()) {
1094                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1095                 }
1096                 return true; /* This was a new ModelAction */
1097         }
1098 }
1099
1100 /**
1101  * @brief Establish reads-from relation between two actions
1102  *
1103  * Perform basic operations involved with establishing a concrete rf relation,
1104  * including setting the ModelAction data and checking for release sequences.
1105  *
1106  * @param act The action that is reading (must be a read)
1107  * @param rf The action from which we are reading (must be a write)
1108  *
1109  * @return True if this read established synchronization
1110  */
1111 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1112 {
1113         ASSERT(rf);
1114         ASSERT(rf->is_write());
1115
1116         act->set_read_from(rf);
1117         if (act->is_acquire()) {
1118                 rel_heads_list_t release_heads;
1119                 get_release_seq_heads(act, act, &release_heads);
1120                 int num_heads = release_heads.size();
1121                 for (unsigned int i = 0; i < release_heads.size(); i++)
1122                         if (!synchronize(release_heads[i], act))
1123                                 num_heads--;
1124                 return num_heads > 0;
1125         }
1126         return false;
1127 }
1128
1129 /**
1130  * @brief Synchronizes two actions
1131  *
1132  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1133  * This function performs the synchronization as well as providing other hooks
1134  * for other checks along with synchronization.
1135  *
1136  * @param first The left-hand side of the synchronizes-with relation
1137  * @param second The right-hand side of the synchronizes-with relation
1138  * @return True if the synchronization was successful (i.e., was consistent
1139  * with the execution order); false otherwise
1140  */
1141 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1142 {
1143         if (*second < *first) {
1144                 set_bad_synchronization();
1145                 return false;
1146         }
1147         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1148         return second->synchronize_with(first);
1149 }
1150
1151 /**
1152  * Check promises and eliminate potentially-satisfying threads when a thread is
1153  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1154  * no longer satisfy a promise generated from that thread.
1155  *
1156  * @param blocker The thread on which a thread is waiting
1157  * @param waiting The waiting thread
1158  */
1159 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1160 {
1161         for (unsigned int i = 0; i < promises->size(); i++) {
1162                 Promise *promise = (*promises)[i];
1163                 if (!promise->thread_is_available(waiting->get_id()))
1164                         continue;
1165                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1166                         ModelAction *reader = promise->get_reader(j);
1167                         if (reader->get_tid() != blocker->get_id())
1168                                 continue;
1169                         if (promise->eliminate_thread(waiting->get_id())) {
1170                                 /* Promise has failed */
1171                                 priv->failed_promise = true;
1172                         } else {
1173                                 /* Only eliminate the 'waiting' thread once */
1174                                 return;
1175                         }
1176                 }
1177         }
1178 }
1179
1180 /**
1181  * @brief Check whether a model action is enabled.
1182  *
1183  * Checks whether a lock or join operation would be successful (i.e., is the
1184  * lock already locked, or is the joined thread already complete). If not, put
1185  * the action in a waiter list.
1186  *
1187  * @param curr is the ModelAction to check whether it is enabled.
1188  * @return a bool that indicates whether the action is enabled.
1189  */
1190 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1191         if (curr->is_lock()) {
1192                 std::mutex *lock = curr->get_mutex();
1193                 struct std::mutex_state *state = lock->get_state();
1194                 if (state->locked)
1195                         return false;
1196         } else if (curr->is_thread_join()) {
1197                 Thread *blocking = curr->get_thread_operand();
1198                 if (!blocking->is_complete()) {
1199                         thread_blocking_check_promises(blocking, get_thread(curr));
1200                         return false;
1201                 }
1202         }
1203
1204         return true;
1205 }
1206
1207 /**
1208  * This is the heart of the model checker routine. It performs model-checking
1209  * actions corresponding to a given "current action." Among other processes, it
1210  * calculates reads-from relationships, updates synchronization clock vectors,
1211  * forms a memory_order constraints graph, and handles replay/backtrack
1212  * execution when running permutations of previously-observed executions.
1213  *
1214  * @param curr The current action to process
1215  * @return The ModelAction that is actually executed; may be different than
1216  * curr; may be NULL, if the current action is not enabled to run
1217  */
1218 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1219 {
1220         ASSERT(curr);
1221         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1222         bool newly_explored = initialize_curr_action(&curr);
1223
1224         DBG();
1225
1226         wake_up_sleeping_actions(curr);
1227
1228         /* Compute fairness information for CHESS yield algorithm */
1229         if (params->yieldon) {
1230                 curr->get_node()->update_yield(scheduler);
1231         }
1232
1233         /* Add the action to lists before any other model-checking tasks */
1234         if (!second_part_of_rmw)
1235                 add_action_to_lists(curr);
1236
1237         /* Build may_read_from set for newly-created actions */
1238         if (newly_explored && curr->is_read())
1239                 build_may_read_from(curr);
1240
1241         /* Initialize work_queue with the "current action" work */
1242         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1243         while (!work_queue.empty() && !has_asserted()) {
1244                 WorkQueueEntry work = work_queue.front();
1245                 work_queue.pop_front();
1246
1247                 switch (work.type) {
1248                 case WORK_CHECK_CURR_ACTION: {
1249                         ModelAction *act = work.action;
1250                         bool update = false; /* update this location's release seq's */
1251                         bool update_all = false; /* update all release seq's */
1252
1253                         if (process_thread_action(curr))
1254                                 update_all = true;
1255
1256                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1257                                 update = true;
1258
1259                         if (act->is_write() && process_write(act))
1260                                 update = true;
1261
1262                         if (act->is_fence() && process_fence(act))
1263                                 update_all = true;
1264
1265                         if (act->is_mutex_op() && process_mutex(act))
1266                                 update_all = true;
1267
1268                         if (act->is_relseq_fixup())
1269                                 process_relseq_fixup(curr, &work_queue);
1270
1271                         if (update_all)
1272                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1273                         else if (update)
1274                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1275                         break;
1276                 }
1277                 case WORK_CHECK_RELEASE_SEQ:
1278                         resolve_release_sequences(work.location, &work_queue);
1279                         break;
1280                 case WORK_CHECK_MO_EDGES: {
1281                         /** @todo Complete verification of work_queue */
1282                         ModelAction *act = work.action;
1283                         bool updated = false;
1284
1285                         if (act->is_read()) {
1286                                 const ModelAction *rf = act->get_reads_from();
1287                                 const Promise *promise = act->get_reads_from_promise();
1288                                 if (rf) {
1289                                         if (r_modification_order(act, rf))
1290                                                 updated = true;
1291                                 } else if (promise) {
1292                                         if (r_modification_order(act, promise))
1293                                                 updated = true;
1294                                 }
1295                         }
1296                         if (act->is_write()) {
1297                                 if (w_modification_order(act, NULL))
1298                                         updated = true;
1299                         }
1300                         mo_graph->commitChanges();
1301
1302                         if (updated)
1303                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1304                         break;
1305                 }
1306                 default:
1307                         ASSERT(false);
1308                         break;
1309                 }
1310         }
1311
1312         check_curr_backtracking(curr);
1313         set_backtracking(curr);
1314         return curr;
1315 }
1316
1317 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1318 {
1319         Node *currnode = curr->get_node();
1320         Node *parnode = currnode->get_parent();
1321
1322         if ((parnode && !parnode->backtrack_empty()) ||
1323                          !currnode->misc_empty() ||
1324                          !currnode->read_from_empty() ||
1325                          !currnode->promise_empty() ||
1326                          !currnode->relseq_break_empty()) {
1327                 set_latest_backtrack(curr);
1328         }
1329 }
1330
1331 bool ModelExecution::promises_expired() const
1332 {
1333         for (unsigned int i = 0; i < promises->size(); i++) {
1334                 Promise *promise = (*promises)[i];
1335                 if (promise->get_expiration() < priv->used_sequence_numbers)
1336                         return true;
1337         }
1338         return false;
1339 }
1340
1341 /**
1342  * This is the strongest feasibility check available.
1343  * @return whether the current trace (partial or complete) must be a prefix of
1344  * a feasible trace.
1345  */
1346 bool ModelExecution::isfeasibleprefix() const
1347 {
1348         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1349 }
1350
1351 /**
1352  * Print disagnostic information about an infeasible execution
1353  * @param prefix A string to prefix the output with; if NULL, then a default
1354  * message prefix will be provided
1355  */
1356 void ModelExecution::print_infeasibility(const char *prefix) const
1357 {
1358         char buf[100];
1359         char *ptr = buf;
1360         if (mo_graph->checkForCycles())
1361                 ptr += sprintf(ptr, "[mo cycle]");
1362         if (priv->failed_promise)
1363                 ptr += sprintf(ptr, "[failed promise]");
1364         if (priv->too_many_reads)
1365                 ptr += sprintf(ptr, "[too many reads]");
1366         if (priv->no_valid_reads)
1367                 ptr += sprintf(ptr, "[no valid reads-from]");
1368         if (priv->bad_synchronization)
1369                 ptr += sprintf(ptr, "[bad sw ordering]");
1370         if (promises_expired())
1371                 ptr += sprintf(ptr, "[promise expired]");
1372         if (promises->size() != 0)
1373                 ptr += sprintf(ptr, "[unresolved promise]");
1374         if (ptr != buf)
1375                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1376 }
1377
1378 /**
1379  * Returns whether the current completed trace is feasible, except for pending
1380  * release sequences.
1381  */
1382 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1383 {
1384         return !is_infeasible() && promises->size() == 0;
1385 }
1386
1387 /**
1388  * Check if the current partial trace is infeasible. Does not check any
1389  * end-of-execution flags, which might rule out the execution. Thus, this is
1390  * useful only for ruling an execution as infeasible.
1391  * @return whether the current partial trace is infeasible.
1392  */
1393 bool ModelExecution::is_infeasible() const
1394 {
1395         return mo_graph->checkForCycles() ||
1396                 priv->no_valid_reads ||
1397                 priv->failed_promise ||
1398                 priv->too_many_reads ||
1399                 priv->bad_synchronization ||
1400                 promises_expired();
1401 }
1402
1403 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1404 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1405         ModelAction *lastread = get_last_action(act->get_tid());
1406         lastread->process_rmw(act);
1407         if (act->is_rmw()) {
1408                 if (lastread->get_reads_from())
1409                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1410                 else
1411                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1412                 mo_graph->commitChanges();
1413         }
1414         return lastread;
1415 }
1416
1417 /**
1418  * A helper function for ModelExecution::check_recency, to check if the current
1419  * thread is able to read from a different write/promise for 'params.maxreads'
1420  * number of steps and if that write/promise should become visible (i.e., is
1421  * ordered later in the modification order). This helps model memory liveness.
1422  *
1423  * @param curr The current action. Must be a read.
1424  * @param rf The write/promise from which we plan to read
1425  * @param other_rf The write/promise from which we may read
1426  * @return True if we were able to read from other_rf for params.maxreads steps
1427  */
1428 template <typename T, typename U>
1429 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1430 {
1431         /* Need a different write/promise */
1432         if (other_rf->equals(rf))
1433                 return false;
1434
1435         /* Only look for "newer" writes/promises */
1436         if (!mo_graph->checkReachable(rf, other_rf))
1437                 return false;
1438
1439         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1440         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1441         action_list_t::reverse_iterator rit = list->rbegin();
1442         ASSERT((*rit) == curr);
1443         /* Skip past curr */
1444         rit++;
1445
1446         /* Does this write/promise work for everyone? */
1447         for (int i = 0; i < params->maxreads; i++, rit++) {
1448                 ModelAction *act = *rit;
1449                 if (!act->may_read_from(other_rf))
1450                         return false;
1451         }
1452         return true;
1453 }
1454
1455 /**
1456  * Checks whether a thread has read from the same write or Promise for too many
1457  * times without seeing the effects of a later write/Promise.
1458  *
1459  * Basic idea:
1460  * 1) there must a different write/promise that we could read from,
1461  * 2) we must have read from the same write/promise in excess of maxreads times,
1462  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1463  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1464  *
1465  * If so, we decide that the execution is no longer feasible.
1466  *
1467  * @param curr The current action. Must be a read.
1468  * @param rf The ModelAction/Promise from which we might read.
1469  * @return True if the read should succeed; false otherwise
1470  */
1471 template <typename T>
1472 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1473 {
1474         if (!params->maxreads)
1475                 return true;
1476
1477         //NOTE: Next check is just optimization, not really necessary....
1478         if (curr->get_node()->get_read_from_past_size() +
1479                         curr->get_node()->get_read_from_promise_size() <= 1)
1480                 return true;
1481
1482         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1483         int tid = id_to_int(curr->get_tid());
1484         ASSERT(tid < (int)thrd_lists->size());
1485         action_list_t *list = &(*thrd_lists)[tid];
1486         action_list_t::reverse_iterator rit = list->rbegin();
1487         ASSERT((*rit) == curr);
1488         /* Skip past curr */
1489         rit++;
1490
1491         action_list_t::reverse_iterator ritcopy = rit;
1492         /* See if we have enough reads from the same value */
1493         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1494                 if (ritcopy == list->rend())
1495                         return true;
1496                 ModelAction *act = *ritcopy;
1497                 if (!act->is_read())
1498                         return true;
1499                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1500                         return true;
1501                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1502                         return true;
1503                 if (act->get_node()->get_read_from_past_size() +
1504                                 act->get_node()->get_read_from_promise_size() <= 1)
1505                         return true;
1506         }
1507         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1508                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1509                 if (should_read_instead(curr, rf, write))
1510                         return false; /* liveness failure */
1511         }
1512         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1513                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1514                 if (should_read_instead(curr, rf, promise))
1515                         return false; /* liveness failure */
1516         }
1517         return true;
1518 }
1519
1520 /**
1521  * @brief Updates the mo_graph with the constraints imposed from the current
1522  * read.
1523  *
1524  * Basic idea is the following: Go through each other thread and find
1525  * the last action that happened before our read.  Two cases:
1526  *
1527  * -# The action is a write: that write must either occur before
1528  * the write we read from or be the write we read from.
1529  * -# The action is a read: the write that that action read from
1530  * must occur before the write we read from or be the same write.
1531  *
1532  * @param curr The current action. Must be a read.
1533  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1534  * @return True if modification order edges were added; false otherwise
1535  */
1536 template <typename rf_type>
1537 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1538 {
1539         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1540         unsigned int i;
1541         bool added = false;
1542         ASSERT(curr->is_read());
1543
1544         /* Last SC fence in the current thread */
1545         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1546         ModelAction *last_sc_write = NULL;
1547         if (curr->is_seqcst())
1548                 last_sc_write = get_last_seq_cst_write(curr);
1549
1550         /* Iterate over all threads */
1551         for (i = 0; i < thrd_lists->size(); i++) {
1552                 /* Last SC fence in thread i */
1553                 ModelAction *last_sc_fence_thread_local = NULL;
1554                 if (int_to_id((int)i) != curr->get_tid())
1555                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1556
1557                 /* Last SC fence in thread i, before last SC fence in current thread */
1558                 ModelAction *last_sc_fence_thread_before = NULL;
1559                 if (last_sc_fence_local)
1560                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1561
1562                 /* Iterate over actions in thread, starting from most recent */
1563                 action_list_t *list = &(*thrd_lists)[i];
1564                 action_list_t::reverse_iterator rit;
1565                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1566                         ModelAction *act = *rit;
1567
1568                         /* Skip curr */
1569                         if (act == curr)
1570                                 continue;
1571                         /* Don't want to add reflexive edges on 'rf' */
1572                         if (act->equals(rf)) {
1573                                 if (act->happens_before(curr))
1574                                         break;
1575                                 else
1576                                         continue;
1577                         }
1578
1579                         if (act->is_write()) {
1580                                 /* C++, Section 29.3 statement 5 */
1581                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1582                                                 *act < *last_sc_fence_thread_local) {
1583                                         added = mo_graph->addEdge(act, rf) || added;
1584                                         break;
1585                                 }
1586                                 /* C++, Section 29.3 statement 4 */
1587                                 else if (act->is_seqcst() && last_sc_fence_local &&
1588                                                 *act < *last_sc_fence_local) {
1589                                         added = mo_graph->addEdge(act, rf) || added;
1590                                         break;
1591                                 }
1592                                 /* C++, Section 29.3 statement 6 */
1593                                 else if (last_sc_fence_thread_before &&
1594                                                 *act < *last_sc_fence_thread_before) {
1595                                         added = mo_graph->addEdge(act, rf) || added;
1596                                         break;
1597                                 }
1598                         }
1599
1600                         /* C++, Section 29.3 statement 3 (second subpoint) */
1601                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1602                                 added = mo_graph->addEdge(act, rf) || added;
1603                                 break;
1604                         }
1605
1606                         /*
1607                          * Include at most one act per-thread that "happens
1608                          * before" curr
1609                          */
1610                         if (act->happens_before(curr)) {
1611                                 if (act->is_write()) {
1612                                         added = mo_graph->addEdge(act, rf) || added;
1613                                 } else {
1614                                         const ModelAction *prevrf = act->get_reads_from();
1615                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1616                                         if (prevrf) {
1617                                                 if (!prevrf->equals(rf))
1618                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1619                                         } else if (!prevrf_promise->equals(rf)) {
1620                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1621                                         }
1622                                 }
1623                                 break;
1624                         }
1625                 }
1626         }
1627
1628         /*
1629          * All compatible, thread-exclusive promises must be ordered after any
1630          * concrete loads from the same thread
1631          */
1632         for (unsigned int i = 0; i < promises->size(); i++)
1633                 if ((*promises)[i]->is_compatible_exclusive(curr))
1634                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1635
1636         return added;
1637 }
1638
1639 /**
1640  * Updates the mo_graph with the constraints imposed from the current write.
1641  *
1642  * Basic idea is the following: Go through each other thread and find
1643  * the lastest action that happened before our write.  Two cases:
1644  *
1645  * (1) The action is a write => that write must occur before
1646  * the current write
1647  *
1648  * (2) The action is a read => the write that that action read from
1649  * must occur before the current write.
1650  *
1651  * This method also handles two other issues:
1652  *
1653  * (I) Sequential Consistency: Making sure that if the current write is
1654  * seq_cst, that it occurs after the previous seq_cst write.
1655  *
1656  * (II) Sending the write back to non-synchronizing reads.
1657  *
1658  * @param curr The current action. Must be a write.
1659  * @param send_fv A vector for stashing reads to which we may pass our future
1660  * value. If NULL, then don't record any future values.
1661  * @return True if modification order edges were added; false otherwise
1662  */
1663 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1664 {
1665         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1666         unsigned int i;
1667         bool added = false;
1668         ASSERT(curr->is_write());
1669
1670         if (curr->is_seqcst()) {
1671                 /* We have to at least see the last sequentially consistent write,
1672                          so we are initialized. */
1673                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1674                 if (last_seq_cst != NULL) {
1675                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1676                 }
1677         }
1678
1679         /* Last SC fence in the current thread */
1680         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1681
1682         /* Iterate over all threads */
1683         for (i = 0; i < thrd_lists->size(); i++) {
1684                 /* Last SC fence in thread i, before last SC fence in current thread */
1685                 ModelAction *last_sc_fence_thread_before = NULL;
1686                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1687                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1688
1689                 /* Iterate over actions in thread, starting from most recent */
1690                 action_list_t *list = &(*thrd_lists)[i];
1691                 action_list_t::reverse_iterator rit;
1692                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1693                         ModelAction *act = *rit;
1694                         if (act == curr) {
1695                                 /*
1696                                  * 1) If RMW and it actually read from something, then we
1697                                  * already have all relevant edges, so just skip to next
1698                                  * thread.
1699                                  *
1700                                  * 2) If RMW and it didn't read from anything, we should
1701                                  * whatever edge we can get to speed up convergence.
1702                                  *
1703                                  * 3) If normal write, we need to look at earlier actions, so
1704                                  * continue processing list.
1705                                  */
1706                                 if (curr->is_rmw()) {
1707                                         if (curr->get_reads_from() != NULL)
1708                                                 break;
1709                                         else
1710                                                 continue;
1711                                 } else
1712                                         continue;
1713                         }
1714
1715                         /* C++, Section 29.3 statement 7 */
1716                         if (last_sc_fence_thread_before && act->is_write() &&
1717                                         *act < *last_sc_fence_thread_before) {
1718                                 added = mo_graph->addEdge(act, curr) || added;
1719                                 break;
1720                         }
1721
1722                         /*
1723                          * Include at most one act per-thread that "happens
1724                          * before" curr
1725                          */
1726                         if (act->happens_before(curr)) {
1727                                 /*
1728                                  * Note: if act is RMW, just add edge:
1729                                  *   act --mo--> curr
1730                                  * The following edge should be handled elsewhere:
1731                                  *   readfrom(act) --mo--> act
1732                                  */
1733                                 if (act->is_write())
1734                                         added = mo_graph->addEdge(act, curr) || added;
1735                                 else if (act->is_read()) {
1736                                         //if previous read accessed a null, just keep going
1737                                         if (act->get_reads_from() == NULL)
1738                                                 continue;
1739                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1740                                 }
1741                                 break;
1742                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1743                                                      !act->same_thread(curr)) {
1744                                 /* We have an action that:
1745                                    (1) did not happen before us
1746                                    (2) is a read and we are a write
1747                                    (3) cannot synchronize with us
1748                                    (4) is in a different thread
1749                                    =>
1750                                    that read could potentially read from our write.  Note that
1751                                    these checks are overly conservative at this point, we'll
1752                                    do more checks before actually removing the
1753                                    pendingfuturevalue.
1754
1755                                  */
1756                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1757                                         if (!is_infeasible())
1758                                                 send_fv->push_back(act);
1759                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1760                                                 add_future_value(curr, act);
1761                                 }
1762                         }
1763                 }
1764         }
1765
1766         /*
1767          * All compatible, thread-exclusive promises must be ordered after any
1768          * concrete stores to the same thread, or else they can be merged with
1769          * this store later
1770          */
1771         for (unsigned int i = 0; i < promises->size(); i++)
1772                 if ((*promises)[i]->is_compatible_exclusive(curr))
1773                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1774
1775         return added;
1776 }
1777
1778 /** Arbitrary reads from the future are not allowed.  Section 29.3
1779  * part 9 places some constraints.  This method checks one result of constraint
1780  * constraint.  Others require compiler support. */
1781 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1782 {
1783         if (!writer->is_rmw())
1784                 return true;
1785
1786         if (!reader->is_rmw())
1787                 return true;
1788
1789         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1790                 if (search == reader)
1791                         return false;
1792                 if (search->get_tid() == reader->get_tid() &&
1793                                 search->happens_before(reader))
1794                         break;
1795         }
1796
1797         return true;
1798 }
1799
1800 /**
1801  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1802  * some constraints. This method checks one the following constraint (others
1803  * require compiler support):
1804  *
1805  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1806  */
1807 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1808 {
1809         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1810         unsigned int i;
1811         /* Iterate over all threads */
1812         for (i = 0; i < thrd_lists->size(); i++) {
1813                 const ModelAction *write_after_read = NULL;
1814
1815                 /* Iterate over actions in thread, starting from most recent */
1816                 action_list_t *list = &(*thrd_lists)[i];
1817                 action_list_t::reverse_iterator rit;
1818                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1819                         ModelAction *act = *rit;
1820
1821                         /* Don't disallow due to act == reader */
1822                         if (!reader->happens_before(act) || reader == act)
1823                                 break;
1824                         else if (act->is_write())
1825                                 write_after_read = act;
1826                         else if (act->is_read() && act->get_reads_from() != NULL)
1827                                 write_after_read = act->get_reads_from();
1828                 }
1829
1830                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1831                         return false;
1832         }
1833         return true;
1834 }
1835
1836 /**
1837  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1838  * The ModelAction under consideration is expected to be taking part in
1839  * release/acquire synchronization as an object of the "reads from" relation.
1840  * Note that this can only provide release sequence support for RMW chains
1841  * which do not read from the future, as those actions cannot be traced until
1842  * their "promise" is fulfilled. Similarly, we may not even establish the
1843  * presence of a release sequence with certainty, as some modification order
1844  * constraints may be decided further in the future. Thus, this function
1845  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1846  * and a boolean representing certainty.
1847  *
1848  * @param rf The action that might be part of a release sequence. Must be a
1849  * write.
1850  * @param release_heads A pass-by-reference style return parameter. After
1851  * execution of this function, release_heads will contain the heads of all the
1852  * relevant release sequences, if any exists with certainty
1853  * @param pending A pass-by-reference style return parameter which is only used
1854  * when returning false (i.e., uncertain). Returns most information regarding
1855  * an uncertain release sequence, including any write operations that might
1856  * break the sequence.
1857  * @return true, if the ModelExecution is certain that release_heads is complete;
1858  * false otherwise
1859  */
1860 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1861                 rel_heads_list_t *release_heads,
1862                 struct release_seq *pending) const
1863 {
1864         /* Only check for release sequences if there are no cycles */
1865         if (mo_graph->checkForCycles())
1866                 return false;
1867
1868         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1869                 ASSERT(rf->is_write());
1870
1871                 if (rf->is_release())
1872                         release_heads->push_back(rf);
1873                 else if (rf->get_last_fence_release())
1874                         release_heads->push_back(rf->get_last_fence_release());
1875                 if (!rf->is_rmw())
1876                         break; /* End of RMW chain */
1877
1878                 /** @todo Need to be smarter here...  In the linux lock
1879                  * example, this will run to the beginning of the program for
1880                  * every acquire. */
1881                 /** @todo The way to be smarter here is to keep going until 1
1882                  * thread has a release preceded by an acquire and you've seen
1883                  *       both. */
1884
1885                 /* acq_rel RMW is a sufficient stopping condition */
1886                 if (rf->is_acquire() && rf->is_release())
1887                         return true; /* complete */
1888         };
1889         if (!rf) {
1890                 /* read from future: need to settle this later */
1891                 pending->rf = NULL;
1892                 return false; /* incomplete */
1893         }
1894
1895         if (rf->is_release())
1896                 return true; /* complete */
1897
1898         /* else relaxed write
1899          * - check for fence-release in the same thread (29.8, stmt. 3)
1900          * - check modification order for contiguous subsequence
1901          *   -> rf must be same thread as release */
1902
1903         const ModelAction *fence_release = rf->get_last_fence_release();
1904         /* Synchronize with a fence-release unconditionally; we don't need to
1905          * find any more "contiguous subsequence..." for it */
1906         if (fence_release)
1907                 release_heads->push_back(fence_release);
1908
1909         int tid = id_to_int(rf->get_tid());
1910         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1911         action_list_t *list = &(*thrd_lists)[tid];
1912         action_list_t::const_reverse_iterator rit;
1913
1914         /* Find rf in the thread list */
1915         rit = std::find(list->rbegin(), list->rend(), rf);
1916         ASSERT(rit != list->rend());
1917
1918         /* Find the last {write,fence}-release */
1919         for (; rit != list->rend(); rit++) {
1920                 if (fence_release && *(*rit) < *fence_release)
1921                         break;
1922                 if ((*rit)->is_release())
1923                         break;
1924         }
1925         if (rit == list->rend()) {
1926                 /* No write-release in this thread */
1927                 return true; /* complete */
1928         } else if (fence_release && *(*rit) < *fence_release) {
1929                 /* The fence-release is more recent (and so, "stronger") than
1930                  * the most recent write-release */
1931                 return true; /* complete */
1932         } /* else, need to establish contiguous release sequence */
1933         ModelAction *release = *rit;
1934
1935         ASSERT(rf->same_thread(release));
1936
1937         pending->writes.clear();
1938
1939         bool certain = true;
1940         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1941                 if (id_to_int(rf->get_tid()) == (int)i)
1942                         continue;
1943                 list = &(*thrd_lists)[i];
1944
1945                 /* Can we ensure no future writes from this thread may break
1946                  * the release seq? */
1947                 bool future_ordered = false;
1948
1949                 ModelAction *last = get_last_action(int_to_id(i));
1950                 Thread *th = get_thread(int_to_id(i));
1951                 if ((last && rf->happens_before(last)) ||
1952                                 !is_enabled(th) ||
1953                                 th->is_complete())
1954                         future_ordered = true;
1955
1956                 ASSERT(!th->is_model_thread() || future_ordered);
1957
1958                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1959                         const ModelAction *act = *rit;
1960                         /* Reach synchronization -> this thread is complete */
1961                         if (act->happens_before(release))
1962                                 break;
1963                         if (rf->happens_before(act)) {
1964                                 future_ordered = true;
1965                                 continue;
1966                         }
1967
1968                         /* Only non-RMW writes can break release sequences */
1969                         if (!act->is_write() || act->is_rmw())
1970                                 continue;
1971
1972                         /* Check modification order */
1973                         if (mo_graph->checkReachable(rf, act)) {
1974                                 /* rf --mo--> act */
1975                                 future_ordered = true;
1976                                 continue;
1977                         }
1978                         if (mo_graph->checkReachable(act, release))
1979                                 /* act --mo--> release */
1980                                 break;
1981                         if (mo_graph->checkReachable(release, act) &&
1982                                       mo_graph->checkReachable(act, rf)) {
1983                                 /* release --mo-> act --mo--> rf */
1984                                 return true; /* complete */
1985                         }
1986                         /* act may break release sequence */
1987                         pending->writes.push_back(act);
1988                         certain = false;
1989                 }
1990                 if (!future_ordered)
1991                         certain = false; /* This thread is uncertain */
1992         }
1993
1994         if (certain) {
1995                 release_heads->push_back(release);
1996                 pending->writes.clear();
1997         } else {
1998                 pending->release = release;
1999                 pending->rf = rf;
2000         }
2001         return certain;
2002 }
2003
2004 /**
2005  * An interface for getting the release sequence head(s) with which a
2006  * given ModelAction must synchronize. This function only returns a non-empty
2007  * result when it can locate a release sequence head with certainty. Otherwise,
2008  * it may mark the internal state of the ModelExecution so that it will handle
2009  * the release sequence at a later time, causing @a acquire to update its
2010  * synchronization at some later point in execution.
2011  *
2012  * @param acquire The 'acquire' action that may synchronize with a release
2013  * sequence
2014  * @param read The read action that may read from a release sequence; this may
2015  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2016  * when 'acquire' is a fence-acquire)
2017  * @param release_heads A pass-by-reference return parameter. Will be filled
2018  * with the head(s) of the release sequence(s), if they exists with certainty.
2019  * @see ModelExecution::release_seq_heads
2020  */
2021 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2022                 ModelAction *read, rel_heads_list_t *release_heads)
2023 {
2024         const ModelAction *rf = read->get_reads_from();
2025         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2026         sequence->acquire = acquire;
2027         sequence->read = read;
2028
2029         if (!release_seq_heads(rf, release_heads, sequence)) {
2030                 /* add act to 'lazy checking' list */
2031                 pending_rel_seqs->push_back(sequence);
2032         } else {
2033                 snapshot_free(sequence);
2034         }
2035 }
2036
2037 /**
2038  * Attempt to resolve all stashed operations that might synchronize with a
2039  * release sequence for a given location. This implements the "lazy" portion of
2040  * determining whether or not a release sequence was contiguous, since not all
2041  * modification order information is present at the time an action occurs.
2042  *
2043  * @param location The location/object that should be checked for release
2044  * sequence resolutions. A NULL value means to check all locations.
2045  * @param work_queue The work queue to which to add work items as they are
2046  * generated
2047  * @return True if any updates occurred (new synchronization, new mo_graph
2048  * edges)
2049  */
2050 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2051 {
2052         bool updated = false;
2053         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2054         while (it != pending_rel_seqs->end()) {
2055                 struct release_seq *pending = *it;
2056                 ModelAction *acquire = pending->acquire;
2057                 const ModelAction *read = pending->read;
2058
2059                 /* Only resolve sequences on the given location, if provided */
2060                 if (location && read->get_location() != location) {
2061                         it++;
2062                         continue;
2063                 }
2064
2065                 const ModelAction *rf = read->get_reads_from();
2066                 rel_heads_list_t release_heads;
2067                 bool complete;
2068                 complete = release_seq_heads(rf, &release_heads, pending);
2069                 for (unsigned int i = 0; i < release_heads.size(); i++)
2070                         if (!acquire->has_synchronized_with(release_heads[i]))
2071                                 if (synchronize(release_heads[i], acquire))
2072                                         updated = true;
2073
2074                 if (updated) {
2075                         /* Re-check all pending release sequences */
2076                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2077                         /* Re-check read-acquire for mo_graph edges */
2078                         if (acquire->is_read())
2079                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2080
2081                         /* propagate synchronization to later actions */
2082                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2083                         for (; (*rit) != acquire; rit++) {
2084                                 ModelAction *propagate = *rit;
2085                                 if (acquire->happens_before(propagate)) {
2086                                         synchronize(acquire, propagate);
2087                                         /* Re-check 'propagate' for mo_graph edges */
2088                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2089                                 }
2090                         }
2091                 }
2092                 if (complete) {
2093                         it = pending_rel_seqs->erase(it);
2094                         snapshot_free(pending);
2095                 } else {
2096                         it++;
2097                 }
2098         }
2099
2100         // If we resolved promises or data races, see if we have realized a data race.
2101         checkDataRaces();
2102
2103         return updated;
2104 }
2105
2106 /**
2107  * Performs various bookkeeping operations for the current ModelAction. For
2108  * instance, adds action to the per-object, per-thread action vector and to the
2109  * action trace list of all thread actions.
2110  *
2111  * @param act is the ModelAction to add.
2112  */
2113 void ModelExecution::add_action_to_lists(ModelAction *act)
2114 {
2115         int tid = id_to_int(act->get_tid());
2116         ModelAction *uninit = NULL;
2117         int uninit_id = -1;
2118         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2119         if (list->empty() && act->is_atomic_var()) {
2120                 uninit = get_uninitialized_action(act);
2121                 uninit_id = id_to_int(uninit->get_tid());
2122                 list->push_front(uninit);
2123         }
2124         list->push_back(act);
2125
2126         action_trace->push_back(act);
2127         if (uninit)
2128                 action_trace->push_front(uninit);
2129
2130         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2131         if (tid >= (int)vec->size())
2132                 vec->resize(priv->next_thread_id);
2133         (*vec)[tid].push_back(act);
2134         if (uninit)
2135                 (*vec)[uninit_id].push_front(uninit);
2136
2137         if ((int)thrd_last_action->size() <= tid)
2138                 thrd_last_action->resize(get_num_threads());
2139         (*thrd_last_action)[tid] = act;
2140         if (uninit)
2141                 (*thrd_last_action)[uninit_id] = uninit;
2142
2143         if (act->is_fence() && act->is_release()) {
2144                 if ((int)thrd_last_fence_release->size() <= tid)
2145                         thrd_last_fence_release->resize(get_num_threads());
2146                 (*thrd_last_fence_release)[tid] = act;
2147         }
2148
2149         if (act->is_wait()) {
2150                 void *mutex_loc = (void *) act->get_value();
2151                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2152
2153                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2154                 if (tid >= (int)vec->size())
2155                         vec->resize(priv->next_thread_id);
2156                 (*vec)[tid].push_back(act);
2157         }
2158 }
2159
2160 /**
2161  * @brief Get the last action performed by a particular Thread
2162  * @param tid The thread ID of the Thread in question
2163  * @return The last action in the thread
2164  */
2165 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2166 {
2167         int threadid = id_to_int(tid);
2168         if (threadid < (int)thrd_last_action->size())
2169                 return (*thrd_last_action)[id_to_int(tid)];
2170         else
2171                 return NULL;
2172 }
2173
2174 /**
2175  * @brief Get the last fence release performed by a particular Thread
2176  * @param tid The thread ID of the Thread in question
2177  * @return The last fence release in the thread, if one exists; NULL otherwise
2178  */
2179 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2180 {
2181         int threadid = id_to_int(tid);
2182         if (threadid < (int)thrd_last_fence_release->size())
2183                 return (*thrd_last_fence_release)[id_to_int(tid)];
2184         else
2185                 return NULL;
2186 }
2187
2188 /**
2189  * Gets the last memory_order_seq_cst write (in the total global sequence)
2190  * performed on a particular object (i.e., memory location), not including the
2191  * current action.
2192  * @param curr The current ModelAction; also denotes the object location to
2193  * check
2194  * @return The last seq_cst write
2195  */
2196 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2197 {
2198         void *location = curr->get_location();
2199         action_list_t *list = get_safe_ptr_action(obj_map, location);
2200         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2201         action_list_t::reverse_iterator rit;
2202         for (rit = list->rbegin(); (*rit) != curr; rit++)
2203                 ;
2204         rit++; /* Skip past curr */
2205         for ( ; rit != list->rend(); rit++)
2206                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2207                         return *rit;
2208         return NULL;
2209 }
2210
2211 /**
2212  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2213  * performed in a particular thread, prior to a particular fence.
2214  * @param tid The ID of the thread to check
2215  * @param before_fence The fence from which to begin the search; if NULL, then
2216  * search for the most recent fence in the thread.
2217  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2218  */
2219 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2220 {
2221         /* All fences should have NULL location */
2222         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2223         action_list_t::reverse_iterator rit = list->rbegin();
2224
2225         if (before_fence) {
2226                 for (; rit != list->rend(); rit++)
2227                         if (*rit == before_fence)
2228                                 break;
2229
2230                 ASSERT(*rit == before_fence);
2231                 rit++;
2232         }
2233
2234         for (; rit != list->rend(); rit++)
2235                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2236                         return *rit;
2237         return NULL;
2238 }
2239
2240 /**
2241  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2242  * location). This function identifies the mutex according to the current
2243  * action, which is presumed to perform on the same mutex.
2244  * @param curr The current ModelAction; also denotes the object location to
2245  * check
2246  * @return The last unlock operation
2247  */
2248 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2249 {
2250         void *location = curr->get_location();
2251         action_list_t *list = get_safe_ptr_action(obj_map, location);
2252         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2253         action_list_t::reverse_iterator rit;
2254         for (rit = list->rbegin(); rit != list->rend(); rit++)
2255                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2256                         return *rit;
2257         return NULL;
2258 }
2259
2260 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2261 {
2262         ModelAction *parent = get_last_action(tid);
2263         if (!parent)
2264                 parent = get_thread(tid)->get_creation();
2265         return parent;
2266 }
2267
2268 /**
2269  * Returns the clock vector for a given thread.
2270  * @param tid The thread whose clock vector we want
2271  * @return Desired clock vector
2272  */
2273 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2274 {
2275         return get_parent_action(tid)->get_cv();
2276 }
2277
2278 /**
2279  * @brief Find the promise (if any) to resolve for the current action and
2280  * remove it from the pending promise vector
2281  * @param curr The current ModelAction. Should be a write.
2282  * @return The Promise to resolve, if any; otherwise NULL
2283  */
2284 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2285 {
2286         for (unsigned int i = 0; i < promises->size(); i++)
2287                 if (curr->get_node()->get_promise(i)) {
2288                         Promise *ret = (*promises)[i];
2289                         promises->erase(promises->begin() + i);
2290                         return ret;
2291                 }
2292         return NULL;
2293 }
2294
2295 /**
2296  * Resolve a Promise with a current write.
2297  * @param write The ModelAction that is fulfilling Promises
2298  * @param promise The Promise to resolve
2299  * @return True if the Promise was successfully resolved; false otherwise
2300  */
2301 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2302 {
2303         ModelVector<ModelAction *> actions_to_check;
2304
2305         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2306                 ModelAction *read = promise->get_reader(i);
2307                 read_from(read, write);
2308                 actions_to_check.push_back(read);
2309         }
2310         /* Make sure the promise's value matches the write's value */
2311         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2312         if (!mo_graph->resolvePromise(promise, write))
2313                 priv->failed_promise = true;
2314
2315         /**
2316          * @todo  It is possible to end up in an inconsistent state, where a
2317          * "resolved" promise may still be referenced if
2318          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2319          *
2320          * Note that the inconsistency only matters when dumping mo_graph to
2321          * file.
2322          *
2323          * delete promise;
2324          */
2325
2326         //Check whether reading these writes has made threads unable to
2327         //resolve promises
2328         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2329                 ModelAction *read = actions_to_check[i];
2330                 mo_check_promises(read, true);
2331         }
2332
2333         return true;
2334 }
2335
2336 /**
2337  * Compute the set of promises that could potentially be satisfied by this
2338  * action. Note that the set computation actually appears in the Node, not in
2339  * ModelExecution.
2340  * @param curr The ModelAction that may satisfy promises
2341  */
2342 void ModelExecution::compute_promises(ModelAction *curr)
2343 {
2344         for (unsigned int i = 0; i < promises->size(); i++) {
2345                 Promise *promise = (*promises)[i];
2346                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2347                         continue;
2348
2349                 bool satisfy = true;
2350                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2351                         const ModelAction *act = promise->get_reader(j);
2352                         if (act->happens_before(curr) ||
2353                                         act->could_synchronize_with(curr)) {
2354                                 satisfy = false;
2355                                 break;
2356                         }
2357                 }
2358                 if (satisfy)
2359                         curr->get_node()->set_promise(i);
2360         }
2361 }
2362
2363 /** Checks promises in response to change in ClockVector Threads. */
2364 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2365 {
2366         for (unsigned int i = 0; i < promises->size(); i++) {
2367                 Promise *promise = (*promises)[i];
2368                 if (!promise->thread_is_available(tid))
2369                         continue;
2370                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2371                         const ModelAction *act = promise->get_reader(j);
2372                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2373                                         merge_cv->synchronized_since(act)) {
2374                                 if (promise->eliminate_thread(tid)) {
2375                                         /* Promise has failed */
2376                                         priv->failed_promise = true;
2377                                         return;
2378                                 }
2379                         }
2380                 }
2381         }
2382 }
2383
2384 void ModelExecution::check_promises_thread_disabled()
2385 {
2386         for (unsigned int i = 0; i < promises->size(); i++) {
2387                 Promise *promise = (*promises)[i];
2388                 if (promise->has_failed()) {
2389                         priv->failed_promise = true;
2390                         return;
2391                 }
2392         }
2393 }
2394
2395 /**
2396  * @brief Checks promises in response to addition to modification order for
2397  * threads.
2398  *
2399  * We test whether threads are still available for satisfying promises after an
2400  * addition to our modification order constraints. Those that are unavailable
2401  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2402  * that promise has failed.
2403  *
2404  * @param act The ModelAction which updated the modification order
2405  * @param is_read_check Should be true if act is a read and we must check for
2406  * updates to the store from which it read (there is a distinction here for
2407  * RMW's, which are both a load and a store)
2408  */
2409 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2410 {
2411         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2412
2413         for (unsigned int i = 0; i < promises->size(); i++) {
2414                 Promise *promise = (*promises)[i];
2415
2416                 // Is this promise on the same location?
2417                 if (!promise->same_location(write))
2418                         continue;
2419
2420                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2421                         const ModelAction *pread = promise->get_reader(j);
2422                         if (!pread->happens_before(act))
2423                                continue;
2424                         if (mo_graph->checkPromise(write, promise)) {
2425                                 priv->failed_promise = true;
2426                                 return;
2427                         }
2428                         break;
2429                 }
2430
2431                 // Don't do any lookups twice for the same thread
2432                 if (!promise->thread_is_available(act->get_tid()))
2433                         continue;
2434
2435                 if (mo_graph->checkReachable(promise, write)) {
2436                         if (mo_graph->checkPromise(write, promise)) {
2437                                 priv->failed_promise = true;
2438                                 return;
2439                         }
2440                 }
2441         }
2442 }
2443
2444 /**
2445  * Compute the set of writes that may break the current pending release
2446  * sequence. This information is extracted from previou release sequence
2447  * calculations.
2448  *
2449  * @param curr The current ModelAction. Must be a release sequence fixup
2450  * action.
2451  */
2452 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2453 {
2454         if (pending_rel_seqs->empty())
2455                 return;
2456
2457         struct release_seq *pending = pending_rel_seqs->back();
2458         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2459                 const ModelAction *write = pending->writes[i];
2460                 curr->get_node()->add_relseq_break(write);
2461         }
2462
2463         /* NULL means don't break the sequence; just synchronize */
2464         curr->get_node()->add_relseq_break(NULL);
2465 }
2466
2467 /**
2468  * Build up an initial set of all past writes that this 'read' action may read
2469  * from, as well as any previously-observed future values that must still be valid.
2470  *
2471  * @param curr is the current ModelAction that we are exploring; it must be a
2472  * 'read' operation.
2473  */
2474 void ModelExecution::build_may_read_from(ModelAction *curr)
2475 {
2476         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2477         unsigned int i;
2478         ASSERT(curr->is_read());
2479
2480         ModelAction *last_sc_write = NULL;
2481
2482         if (curr->is_seqcst())
2483                 last_sc_write = get_last_seq_cst_write(curr);
2484
2485         /* Iterate over all threads */
2486         for (i = 0; i < thrd_lists->size(); i++) {
2487                 /* Iterate over actions in thread, starting from most recent */
2488                 action_list_t *list = &(*thrd_lists)[i];
2489                 action_list_t::reverse_iterator rit;
2490                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2491                         ModelAction *act = *rit;
2492
2493                         /* Only consider 'write' actions */
2494                         if (!act->is_write() || act == curr)
2495                                 continue;
2496
2497                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2498                         bool allow_read = true;
2499
2500                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2501                                 allow_read = false;
2502                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2503                                 allow_read = false;
2504
2505                         if (allow_read) {
2506                                 /* Only add feasible reads */
2507                                 mo_graph->startChanges();
2508                                 r_modification_order(curr, act);
2509                                 if (!is_infeasible())
2510                                         curr->get_node()->add_read_from_past(act);
2511                                 mo_graph->rollbackChanges();
2512                         }
2513
2514                         /* Include at most one act per-thread that "happens before" curr */
2515                         if (act->happens_before(curr))
2516                                 break;
2517                 }
2518         }
2519
2520         /* Inherit existing, promised future values */
2521         for (i = 0; i < promises->size(); i++) {
2522                 const Promise *promise = (*promises)[i];
2523                 const ModelAction *promise_read = promise->get_reader(0);
2524                 if (promise_read->same_var(curr)) {
2525                         /* Only add feasible future-values */
2526                         mo_graph->startChanges();
2527                         r_modification_order(curr, promise);
2528                         if (!is_infeasible())
2529                                 curr->get_node()->add_read_from_promise(promise_read);
2530                         mo_graph->rollbackChanges();
2531                 }
2532         }
2533
2534         /* We may find no valid may-read-from only if the execution is doomed */
2535         if (!curr->get_node()->read_from_size()) {
2536                 priv->no_valid_reads = true;
2537                 set_assert();
2538         }
2539
2540         if (DBG_ENABLED()) {
2541                 model_print("Reached read action:\n");
2542                 curr->print();
2543                 model_print("Printing read_from_past\n");
2544                 curr->get_node()->print_read_from_past();
2545                 model_print("End printing read_from_past\n");
2546         }
2547 }
2548
2549 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2550 {
2551         for ( ; write != NULL; write = write->get_reads_from()) {
2552                 /* UNINIT actions don't have a Node, and they never sleep */
2553                 if (write->is_uninitialized())
2554                         return true;
2555                 Node *prevnode = write->get_node()->get_parent();
2556
2557                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2558                 if (write->is_release() && thread_sleep)
2559                         return true;
2560                 if (!write->is_rmw())
2561                         return false;
2562         }
2563         return true;
2564 }
2565
2566 /**
2567  * @brief Get an action representing an uninitialized atomic
2568  *
2569  * This function may create a new one or try to retrieve one from the NodeStack
2570  *
2571  * @param curr The current action, which prompts the creation of an UNINIT action
2572  * @return A pointer to the UNINIT ModelAction
2573  */
2574 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2575 {
2576         Node *node = curr->get_node();
2577         ModelAction *act = node->get_uninit_action();
2578         if (!act) {
2579                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2580                 node->set_uninit_action(act);
2581         }
2582         act->create_cv(NULL);
2583         return act;
2584 }
2585
2586 static void print_list(action_list_t *list)
2587 {
2588         action_list_t::iterator it;
2589
2590         model_print("---------------------------------------------------------------------\n");
2591
2592         unsigned int hash = 0;
2593
2594         for (it = list->begin(); it != list->end(); it++) {
2595                 const ModelAction *act = *it;
2596                 if (act->get_seq_number() > 0)
2597                         act->print();
2598                 hash = hash^(hash<<3)^((*it)->hash());
2599         }
2600         model_print("HASH %u\n", hash);
2601         model_print("---------------------------------------------------------------------\n");
2602 }
2603
2604 #if SUPPORT_MOD_ORDER_DUMP
2605 void ModelExecution::dumpGraph(char *filename) const
2606 {
2607         char buffer[200];
2608         sprintf(buffer, "%s.dot", filename);
2609         FILE *file = fopen(buffer, "w");
2610         fprintf(file, "digraph %s {\n", filename);
2611         mo_graph->dumpNodes(file);
2612         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2613
2614         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2615                 ModelAction *act = *it;
2616                 if (act->is_read()) {
2617                         mo_graph->dot_print_node(file, act);
2618                         if (act->get_reads_from())
2619                                 mo_graph->dot_print_edge(file,
2620                                                 act->get_reads_from(),
2621                                                 act,
2622                                                 "label=\"rf\", color=red, weight=2");
2623                         else
2624                                 mo_graph->dot_print_edge(file,
2625                                                 act->get_reads_from_promise(),
2626                                                 act,
2627                                                 "label=\"rf\", color=red");
2628                 }
2629                 if (thread_array[act->get_tid()]) {
2630                         mo_graph->dot_print_edge(file,
2631                                         thread_array[id_to_int(act->get_tid())],
2632                                         act,
2633                                         "label=\"sb\", color=blue, weight=400");
2634                 }
2635
2636                 thread_array[act->get_tid()] = act;
2637         }
2638         fprintf(file, "}\n");
2639         model_free(thread_array);
2640         fclose(file);
2641 }
2642 #endif
2643
2644 /** @brief Prints an execution trace summary. */
2645 void ModelExecution::print_summary() const
2646 {
2647 #if SUPPORT_MOD_ORDER_DUMP
2648         char buffername[100];
2649         sprintf(buffername, "exec%04u", execution_number);
2650         mo_graph->dumpGraphToFile(buffername);
2651         sprintf(buffername, "graph%04u", execution_number);
2652         dumpGraph(buffername);
2653 #endif
2654
2655         model_print("Execution %d:", execution_number);
2656         if (isfeasibleprefix()) {
2657                 if (scheduler->all_threads_sleeping())
2658                         model_print(" SLEEP-SET REDUNDANT");
2659                 model_print("\n");
2660         } else
2661                 print_infeasibility(" INFEASIBLE");
2662         print_list(action_trace);
2663         model_print("\n");
2664         if (!promises->empty()) {
2665                 model_print("Pending promises:\n");
2666                 for (unsigned int i = 0; i < promises->size(); i++) {
2667                         model_print(" [P%u] ", i);
2668                         (*promises)[i]->print();
2669                 }
2670                 model_print("\n");
2671         }
2672 }
2673
2674 /**
2675  * Add a Thread to the system for the first time. Should only be called once
2676  * per thread.
2677  * @param t The Thread to add
2678  */
2679 void ModelExecution::add_thread(Thread *t)
2680 {
2681         thread_map->put(id_to_int(t->get_id()), t);
2682         if (!t->is_model_thread())
2683                 scheduler->add_thread(t);
2684 }
2685
2686 /**
2687  * @brief Get a Thread reference by its ID
2688  * @param tid The Thread's ID
2689  * @return A Thread reference
2690  */
2691 Thread * ModelExecution::get_thread(thread_id_t tid) const
2692 {
2693         return thread_map->get(id_to_int(tid));
2694 }
2695
2696 /**
2697  * @brief Get a reference to the Thread in which a ModelAction was executed
2698  * @param act The ModelAction
2699  * @return A Thread reference
2700  */
2701 Thread * ModelExecution::get_thread(const ModelAction *act) const
2702 {
2703         return get_thread(act->get_tid());
2704 }
2705
2706 /**
2707  * @brief Get a Promise's "promise number"
2708  *
2709  * A "promise number" is an index number that is unique to a promise, valid
2710  * only for a specific snapshot of an execution trace. Promises may come and go
2711  * as they are generated an resolved, so an index only retains meaning for the
2712  * current snapshot.
2713  *
2714  * @param promise The Promise to check
2715  * @return The promise index, if the promise still is valid; otherwise -1
2716  */
2717 int ModelExecution::get_promise_number(const Promise *promise) const
2718 {
2719         for (unsigned int i = 0; i < promises->size(); i++)
2720                 if ((*promises)[i] == promise)
2721                         return i;
2722         /* Not found */
2723         return -1;
2724 }
2725
2726 /**
2727  * @brief Check if a Thread is currently enabled
2728  * @param t The Thread to check
2729  * @return True if the Thread is currently enabled
2730  */
2731 bool ModelExecution::is_enabled(Thread *t) const
2732 {
2733         return scheduler->is_enabled(t);
2734 }
2735
2736 /**
2737  * @brief Check if a Thread is currently enabled
2738  * @param tid The ID of the Thread to check
2739  * @return True if the Thread is currently enabled
2740  */
2741 bool ModelExecution::is_enabled(thread_id_t tid) const
2742 {
2743         return scheduler->is_enabled(tid);
2744 }
2745
2746 /**
2747  * @brief Select the next thread to execute based on the curren action
2748  *
2749  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2750  * actions should be followed by the execution of their child thread. In either
2751  * case, the current action should determine the next thread schedule.
2752  *
2753  * @param curr The current action
2754  * @return The next thread to run, if the current action will determine this
2755  * selection; otherwise NULL
2756  */
2757 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2758 {
2759         /* Do not split atomic RMW */
2760         if (curr->is_rmwr())
2761                 return get_thread(curr);
2762         /* Follow CREATE with the created thread */
2763         if (curr->get_type() == THREAD_CREATE)
2764                 return curr->get_thread_operand();
2765         return NULL;
2766 }
2767
2768 /** @return True if the execution has taken too many steps */
2769 bool ModelExecution::too_many_steps() const
2770 {
2771         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2772 }
2773
2774 /**
2775  * Takes the next step in the execution, if possible.
2776  * @param curr The current step to take
2777  * @return Returns the next Thread to run, if any; NULL if this execution
2778  * should terminate
2779  */
2780 Thread * ModelExecution::take_step(ModelAction *curr)
2781 {
2782         Thread *curr_thrd = get_thread(curr);
2783         ASSERT(curr_thrd->get_state() == THREAD_READY);
2784
2785         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2786         curr = check_current_action(curr);
2787         ASSERT(curr);
2788
2789         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2790                 scheduler->remove_thread(curr_thrd);
2791
2792         return action_select_next_thread(curr);
2793 }
2794
2795 /**
2796  * Launch end-of-execution release sequence fixups only when
2797  * the execution is otherwise feasible AND there are:
2798  *
2799  * (1) pending release sequences
2800  * (2) pending assertions that could be invalidated by a change
2801  * in clock vectors (i.e., data races)
2802  * (3) no pending promises
2803  */
2804 void ModelExecution::fixup_release_sequences()
2805 {
2806         while (!pending_rel_seqs->empty() &&
2807                         is_feasible_prefix_ignore_relseq() &&
2808                         !unrealizedraces.empty()) {
2809                 model_print("*** WARNING: release sequence fixup action "
2810                                 "(%zu pending release seuqence(s)) ***\n",
2811                                 pending_rel_seqs->size());
2812                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2813                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2814                                 model_thread);
2815                 take_step(fixup);
2816         };
2817 }