scanalysis: fixup spacing
[model-checker.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "execution.h"
8 #include "model.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "snapshot-interface.h"
13 #include "common.h"
14 #include "clockvector.h"
15 #include "cyclegraph.h"
16 #include "promise.h"
17 #include "datarace.h"
18 #include "threads-model.h"
19 #include "output.h"
20 #include "bugmessage.h"
21
22 #define INITIAL_THREAD_ID       0
23
24 /**
25  * Structure for holding small ModelChecker members that should be snapshotted
26  */
27 struct model_snapshot_members {
28         model_snapshot_members() :
29                 /* First thread created will have id INITIAL_THREAD_ID */
30                 next_thread_id(INITIAL_THREAD_ID),
31                 used_sequence_numbers(0),
32                 next_backtrack(NULL),
33                 bugs(),
34                 stats(),
35                 failed_promise(false),
36                 too_many_reads(false),
37                 no_valid_reads(false),
38                 bad_synchronization(false),
39                 asserted(false)
40         { }
41
42         ~model_snapshot_members() {
43                 for (unsigned int i = 0; i < bugs.size(); i++)
44                         delete bugs[i];
45                 bugs.clear();
46         }
47
48         unsigned int next_thread_id;
49         modelclock_t used_sequence_numbers;
50         ModelAction *next_backtrack;
51         SnapVector<bug_message *> bugs;
52         struct execution_stats stats;
53         bool failed_promise;
54         bool too_many_reads;
55         bool no_valid_reads;
56         /** @brief Incorrectly-ordered synchronization was made */
57         bool bad_synchronization;
58         bool asserted;
59
60         SNAPSHOTALLOC
61 };
62
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(struct model_params *params, Scheduler *scheduler, NodeStack *node_stack) :
65         params(params),
66         scheduler(scheduler),
67         action_trace(new action_list_t()),
68         thread_map(new HashTable<int, Thread *, int>()),
69         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
70         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
71         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
72         promises(new SnapVector<Promise *>()),
73         futurevalues(new SnapVector<struct PendingFutureValue>()),
74         pending_rel_seqs(new SnapVector<struct release_seq *>()),
75         thrd_last_action(new SnapVector<ModelAction *>(1)),
76         thrd_last_fence_release(new SnapVector<ModelAction *>()),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph()),
80         execution_number(1)
81 {
82         /* Initialize a model-checker thread, for special ModelActions */
83         model_thread = new Thread(get_next_id());
84         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
85         scheduler->register_engine(this);
86 }
87
88 /** @brief Destructor */
89 ModelExecution::~ModelExecution()
90 {
91         for (unsigned int i = 0; i < get_num_threads(); i++)
92                 delete thread_map->get(i);
93         delete thread_map;
94
95         delete obj_thrd_map;
96         delete obj_map;
97         delete condvar_waiters_map;
98         delete action_trace;
99
100         for (unsigned int i = 0; i < promises->size(); i++)
101                 delete (*promises)[i];
102         delete promises;
103
104         delete pending_rel_seqs;
105
106         delete thrd_last_action;
107         delete thrd_last_fence_release;
108         delete mo_graph;
109         delete priv;
110 }
111
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
113 {
114         action_list_t *tmp = hash->get(ptr);
115         if (tmp == NULL) {
116                 tmp = new action_list_t();
117                 hash->put(ptr, tmp);
118         }
119         return tmp;
120 }
121
122 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
123 {
124         SnapVector<action_list_t> *tmp = hash->get(ptr);
125         if (tmp == NULL) {
126                 tmp = new SnapVector<action_list_t>();
127                 hash->put(ptr, tmp);
128         }
129         return tmp;
130 }
131
132 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
133 {
134         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
135         if (wrv==NULL)
136                 return NULL;
137         unsigned int thread=id_to_int(tid);
138         if (thread < wrv->size())
139                 return &(*wrv)[thread];
140         else
141                 return NULL;
142 }
143
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelExecution::get_next_id()
146 {
147         return priv->next_thread_id++;
148 }
149
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelExecution::get_num_threads() const
152 {
153         return priv->next_thread_id;
154 }
155
156 /** @return a sequence number for a new ModelAction */
157 modelclock_t ModelExecution::get_next_seq_num()
158 {
159         return ++priv->used_sequence_numbers;
160 }
161
162 /**
163  * @brief Should the current action wake up a given thread?
164  *
165  * @param curr The current action
166  * @param thread The thread that we might wake up
167  * @return True, if we should wake up the sleeping thread; false otherwise
168  */
169 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
170 {
171         const ModelAction *asleep = thread->get_pending();
172         /* Don't allow partial RMW to wake anyone up */
173         if (curr->is_rmwr())
174                 return false;
175         /* Synchronizing actions may have been backtracked */
176         if (asleep->could_synchronize_with(curr))
177                 return true;
178         /* All acquire/release fences and fence-acquire/store-release */
179         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
180                 return true;
181         /* Fence-release + store can awake load-acquire on the same location */
182         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
183                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
184                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
185                         return true;
186         }
187         return false;
188 }
189
190 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
191 {
192         for (unsigned int i = 0; i < get_num_threads(); i++) {
193                 Thread *thr = get_thread(int_to_id(i));
194                 if (scheduler->is_sleep_set(thr)) {
195                         if (should_wake_up(curr, thr))
196                                 /* Remove this thread from sleep set */
197                                 scheduler->remove_sleep(thr);
198                 }
199         }
200 }
201
202 /** @brief Alert the model-checker that an incorrectly-ordered
203  * synchronization was made */
204 void ModelExecution::set_bad_synchronization()
205 {
206         priv->bad_synchronization = true;
207 }
208
209 bool ModelExecution::assert_bug(const char *msg)
210 {
211         priv->bugs.push_back(new bug_message(msg));
212
213         if (isfeasibleprefix()) {
214                 set_assert();
215                 return true;
216         }
217         return false;
218 }
219
220 /** @return True, if any bugs have been reported for this execution */
221 bool ModelExecution::have_bug_reports() const
222 {
223         return priv->bugs.size() != 0;
224 }
225
226 SnapVector<bug_message *> * ModelExecution::get_bugs() const
227 {
228         return &priv->bugs;
229 }
230
231 /**
232  * Check whether the current trace has triggered an assertion which should halt
233  * its execution.
234  *
235  * @return True, if the execution should be aborted; false otherwise
236  */
237 bool ModelExecution::has_asserted() const
238 {
239         return priv->asserted;
240 }
241
242 /**
243  * Trigger a trace assertion which should cause this execution to be halted.
244  * This can be due to a detected bug or due to an infeasibility that should
245  * halt ASAP.
246  */
247 void ModelExecution::set_assert()
248 {
249         priv->asserted = true;
250 }
251
252 /**
253  * Check if we are in a deadlock. Should only be called at the end of an
254  * execution, although it should not give false positives in the middle of an
255  * execution (there should be some ENABLED thread).
256  *
257  * @return True if program is in a deadlock; false otherwise
258  */
259 bool ModelExecution::is_deadlocked() const
260 {
261         bool blocking_threads = false;
262         for (unsigned int i = 0; i < get_num_threads(); i++) {
263                 thread_id_t tid = int_to_id(i);
264                 if (is_enabled(tid))
265                         return false;
266                 Thread *t = get_thread(tid);
267                 if (!t->is_model_thread() && t->get_pending())
268                         blocking_threads = true;
269         }
270         return blocking_threads;
271 }
272
273 /**
274  * Check if this is a complete execution. That is, have all thread completed
275  * execution (rather than exiting because sleep sets have forced a redundant
276  * execution).
277  *
278  * @return True if the execution is complete.
279  */
280 bool ModelExecution::is_complete_execution() const
281 {
282         for (unsigned int i = 0; i < get_num_threads(); i++)
283                 if (is_enabled(int_to_id(i)))
284                         return false;
285         return true;
286 }
287
288 /**
289  * @brief Find the last fence-related backtracking conflict for a ModelAction
290  *
291  * This function performs the search for the most recent conflicting action
292  * against which we should perform backtracking, as affected by fence
293  * operations. This includes pairs of potentially-synchronizing actions which
294  * occur due to fence-acquire or fence-release, and hence should be explored in
295  * the opposite execution order.
296  *
297  * @param act The current action
298  * @return The most recent action which conflicts with act due to fences
299  */
300 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
301 {
302         /* Only perform release/acquire fence backtracking for stores */
303         if (!act->is_write())
304                 return NULL;
305
306         /* Find a fence-release (or, act is a release) */
307         ModelAction *last_release;
308         if (act->is_release())
309                 last_release = act;
310         else
311                 last_release = get_last_fence_release(act->get_tid());
312         if (!last_release)
313                 return NULL;
314
315         /* Skip past the release */
316         action_list_t *list = action_trace;
317         action_list_t::reverse_iterator rit;
318         for (rit = list->rbegin(); rit != list->rend(); rit++)
319                 if (*rit == last_release)
320                         break;
321         ASSERT(rit != list->rend());
322
323         /* Find a prior:
324          *   load-acquire
325          * or
326          *   load --sb-> fence-acquire */
327         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
328         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
329         bool found_acquire_fences = false;
330         for ( ; rit != list->rend(); rit++) {
331                 ModelAction *prev = *rit;
332                 if (act->same_thread(prev))
333                         continue;
334
335                 int tid = id_to_int(prev->get_tid());
336
337                 if (prev->is_read() && act->same_var(prev)) {
338                         if (prev->is_acquire()) {
339                                 /* Found most recent load-acquire, don't need
340                                  * to search for more fences */
341                                 if (!found_acquire_fences)
342                                         return NULL;
343                         } else {
344                                 prior_loads[tid] = prev;
345                         }
346                 }
347                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
348                         found_acquire_fences = true;
349                         acquire_fences[tid] = prev;
350                 }
351         }
352
353         ModelAction *latest_backtrack = NULL;
354         for (unsigned int i = 0; i < acquire_fences.size(); i++)
355                 if (acquire_fences[i] && prior_loads[i])
356                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
357                                 latest_backtrack = acquire_fences[i];
358         return latest_backtrack;
359 }
360
361 /**
362  * @brief Find the last backtracking conflict for a ModelAction
363  *
364  * This function performs the search for the most recent conflicting action
365  * against which we should perform backtracking. This primary includes pairs of
366  * synchronizing actions which should be explored in the opposite execution
367  * order.
368  *
369  * @param act The current action
370  * @return The most recent action which conflicts with act
371  */
372 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
373 {
374         switch (act->get_type()) {
375         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
376         case ATOMIC_READ:
377         case ATOMIC_WRITE:
378         case ATOMIC_RMW: {
379                 ModelAction *ret = NULL;
380
381                 /* linear search: from most recent to oldest */
382                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
383                 action_list_t::reverse_iterator rit;
384                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
385                         ModelAction *prev = *rit;
386                         if (prev->could_synchronize_with(act)) {
387                                 ret = prev;
388                                 break;
389                         }
390                 }
391
392                 ModelAction *ret2 = get_last_fence_conflict(act);
393                 if (!ret2)
394                         return ret;
395                 if (!ret)
396                         return ret2;
397                 if (*ret < *ret2)
398                         return ret2;
399                 return ret;
400         }
401         case ATOMIC_LOCK:
402         case ATOMIC_TRYLOCK: {
403                 /* linear search: from most recent to oldest */
404                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
405                 action_list_t::reverse_iterator rit;
406                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
407                         ModelAction *prev = *rit;
408                         if (act->is_conflicting_lock(prev))
409                                 return prev;
410                 }
411                 break;
412         }
413         case ATOMIC_UNLOCK: {
414                 /* linear search: from most recent to oldest */
415                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
416                 action_list_t::reverse_iterator rit;
417                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
418                         ModelAction *prev = *rit;
419                         if (!act->same_thread(prev) && prev->is_failed_trylock())
420                                 return prev;
421                 }
422                 break;
423         }
424         case ATOMIC_WAIT: {
425                 /* linear search: from most recent to oldest */
426                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
427                 action_list_t::reverse_iterator rit;
428                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
429                         ModelAction *prev = *rit;
430                         if (!act->same_thread(prev) && prev->is_failed_trylock())
431                                 return prev;
432                         if (!act->same_thread(prev) && prev->is_notify())
433                                 return prev;
434                 }
435                 break;
436         }
437
438         case ATOMIC_NOTIFY_ALL:
439         case ATOMIC_NOTIFY_ONE: {
440                 /* linear search: from most recent to oldest */
441                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
442                 action_list_t::reverse_iterator rit;
443                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
444                         ModelAction *prev = *rit;
445                         if (!act->same_thread(prev) && prev->is_wait())
446                                 return prev;
447                 }
448                 break;
449         }
450         default:
451                 break;
452         }
453         return NULL;
454 }
455
456 /** This method finds backtracking points where we should try to
457  * reorder the parameter ModelAction against.
458  *
459  * @param the ModelAction to find backtracking points for.
460  */
461 void ModelExecution::set_backtracking(ModelAction *act)
462 {
463         Thread *t = get_thread(act);
464         ModelAction *prev = get_last_conflict(act);
465         if (prev == NULL)
466                 return;
467
468         Node *node = prev->get_node()->get_parent();
469
470         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
471         int low_tid, high_tid;
472         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
473                 low_tid = id_to_int(act->get_tid());
474                 high_tid = low_tid + 1;
475         } else {
476                 low_tid = 0;
477                 high_tid = get_num_threads();
478         }
479
480         for (int i = low_tid; i < high_tid; i++) {
481                 thread_id_t tid = int_to_id(i);
482
483                 /* Make sure this thread can be enabled here. */
484                 if (i >= node->get_num_threads())
485                         break;
486
487                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
488                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
489                 if (node->enabled_status(tid) != THREAD_ENABLED)
490                         continue;
491
492                 /* Check if this has been explored already */
493                 if (node->has_been_explored(tid))
494                         continue;
495
496                 /* See if fairness allows */
497                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
498                         bool unfair = false;
499                         for (int t = 0; t < node->get_num_threads(); t++) {
500                                 thread_id_t tother = int_to_id(t);
501                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
502                                         unfair = true;
503                                         break;
504                                 }
505                         }
506                         if (unfair)
507                                 continue;
508                 }
509
510                 /* See if CHESS-like yield fairness allows */
511                 if (params->yieldon) {
512                         bool unfair = false;
513                         for (int t = 0; t < node->get_num_threads(); t++) {
514                                 thread_id_t tother = int_to_id(t);
515                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
516                                         unfair = true;
517                                         break;
518                                 }
519                         }
520                         if (unfair)
521                                 continue;
522                 }
523
524                 /* Cache the latest backtracking point */
525                 set_latest_backtrack(prev);
526
527                 /* If this is a new backtracking point, mark the tree */
528                 if (!node->set_backtrack(tid))
529                         continue;
530                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
531                                         id_to_int(prev->get_tid()),
532                                         id_to_int(t->get_id()));
533                 if (DBG_ENABLED()) {
534                         prev->print();
535                         act->print();
536                 }
537         }
538 }
539
540 /**
541  * @brief Cache the a backtracking point as the "most recent", if eligible
542  *
543  * Note that this does not prepare the NodeStack for this backtracking
544  * operation, it only caches the action on a per-execution basis
545  *
546  * @param act The operation at which we should explore a different next action
547  * (i.e., backtracking point)
548  * @return True, if this action is now the most recent backtracking point;
549  * false otherwise
550  */
551 bool ModelExecution::set_latest_backtrack(ModelAction *act)
552 {
553         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
554                 priv->next_backtrack = act;
555                 return true;
556         }
557         return false;
558 }
559
560 /**
561  * Returns last backtracking point. The model checker will explore a different
562  * path for this point in the next execution.
563  * @return The ModelAction at which the next execution should diverge.
564  */
565 ModelAction * ModelExecution::get_next_backtrack()
566 {
567         ModelAction *next = priv->next_backtrack;
568         priv->next_backtrack = NULL;
569         return next;
570 }
571
572 /**
573  * Processes a read model action.
574  * @param curr is the read model action to process.
575  * @return True if processing this read updates the mo_graph.
576  */
577 bool ModelExecution::process_read(ModelAction *curr)
578 {
579         Node *node = curr->get_node();
580         while (true) {
581                 bool updated = false;
582                 switch (node->get_read_from_status()) {
583                 case READ_FROM_PAST: {
584                         const ModelAction *rf = node->get_read_from_past();
585                         ASSERT(rf);
586
587                         mo_graph->startChanges();
588
589                         ASSERT(!is_infeasible());
590                         if (!check_recency(curr, rf)) {
591                                 if (node->increment_read_from()) {
592                                         mo_graph->rollbackChanges();
593                                         continue;
594                                 } else {
595                                         priv->too_many_reads = true;
596                                 }
597                         }
598
599                         updated = r_modification_order(curr, rf);
600                         read_from(curr, rf);
601                         mo_graph->commitChanges();
602                         mo_check_promises(curr, true);
603                         break;
604                 }
605                 case READ_FROM_PROMISE: {
606                         Promise *promise = curr->get_node()->get_read_from_promise();
607                         if (promise->add_reader(curr))
608                                 priv->failed_promise = true;
609                         curr->set_read_from_promise(promise);
610                         mo_graph->startChanges();
611                         if (!check_recency(curr, promise))
612                                 priv->too_many_reads = true;
613                         updated = r_modification_order(curr, promise);
614                         mo_graph->commitChanges();
615                         break;
616                 }
617                 case READ_FROM_FUTURE: {
618                         /* Read from future value */
619                         struct future_value fv = node->get_future_value();
620                         Promise *promise = new Promise(this, curr, fv);
621                         curr->set_read_from_promise(promise);
622                         promises->push_back(promise);
623                         mo_graph->startChanges();
624                         updated = r_modification_order(curr, promise);
625                         mo_graph->commitChanges();
626                         break;
627                 }
628                 default:
629                         ASSERT(false);
630                 }
631                 get_thread(curr)->set_return_value(curr->get_return_value());
632                 return updated;
633         }
634 }
635
636 /**
637  * Processes a lock, trylock, or unlock model action.  @param curr is
638  * the read model action to process.
639  *
640  * The try lock operation checks whether the lock is taken.  If not,
641  * it falls to the normal lock operation case.  If so, it returns
642  * fail.
643  *
644  * The lock operation has already been checked that it is enabled, so
645  * it just grabs the lock and synchronizes with the previous unlock.
646  *
647  * The unlock operation has to re-enable all of the threads that are
648  * waiting on the lock.
649  *
650  * @return True if synchronization was updated; false otherwise
651  */
652 bool ModelExecution::process_mutex(ModelAction *curr)
653 {
654         std::mutex *mutex = curr->get_mutex();
655         struct std::mutex_state *state = NULL;
656
657         if (mutex)
658                 state = mutex->get_state();
659
660         switch (curr->get_type()) {
661         case ATOMIC_TRYLOCK: {
662                 bool success = !state->locked;
663                 curr->set_try_lock(success);
664                 if (!success) {
665                         get_thread(curr)->set_return_value(0);
666                         break;
667                 }
668                 get_thread(curr)->set_return_value(1);
669         }
670                 //otherwise fall into the lock case
671         case ATOMIC_LOCK: {
672                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
673                         assert_bug("Lock access before initialization");
674                 state->locked = get_thread(curr);
675                 ModelAction *unlock = get_last_unlock(curr);
676                 //synchronize with the previous unlock statement
677                 if (unlock != NULL) {
678                         synchronize(unlock, curr);
679                         return true;
680                 }
681                 break;
682         }
683         case ATOMIC_WAIT:
684         case ATOMIC_UNLOCK: {
685                 /* wake up the other threads */
686                 for (unsigned int i = 0; i < get_num_threads(); i++) {
687                         Thread *t = get_thread(int_to_id(i));
688                         Thread *curr_thrd = get_thread(curr);
689                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
690                                 scheduler->wake(t);
691                 }
692
693                 /* unlock the lock - after checking who was waiting on it */
694                 state->locked = NULL;
695
696                 if (!curr->is_wait())
697                         break; /* The rest is only for ATOMIC_WAIT */
698
699                 /* Should we go to sleep? (simulate spurious failures) */
700                 if (curr->get_node()->get_misc() == 0) {
701                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
702                         /* disable us */
703                         scheduler->sleep(get_thread(curr));
704                 }
705                 break;
706         }
707         case ATOMIC_NOTIFY_ALL: {
708                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
709                 //activate all the waiting threads
710                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
711                         scheduler->wake(get_thread(*rit));
712                 }
713                 waiters->clear();
714                 break;
715         }
716         case ATOMIC_NOTIFY_ONE: {
717                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
718                 int wakeupthread = curr->get_node()->get_misc();
719                 action_list_t::iterator it = waiters->begin();
720                 advance(it, wakeupthread);
721                 scheduler->wake(get_thread(*it));
722                 waiters->erase(it);
723                 break;
724         }
725
726         default:
727                 ASSERT(0);
728         }
729         return false;
730 }
731
732 /**
733  * @brief Check if the current pending promises allow a future value to be sent
734  *
735  * If one of the following is true:
736  *  (a) there are no pending promises
737  *  (b) the reader and writer do not cross any promises
738  * Then, it is safe to pass a future value back now.
739  *
740  * Otherwise, we must save the pending future value until (a) or (b) is true
741  *
742  * @param writer The operation which sends the future value. Must be a write.
743  * @param reader The operation which will observe the value. Must be a read.
744  * @return True if the future value can be sent now; false if it must wait.
745  */
746 bool ModelExecution::promises_may_allow(const ModelAction *writer,
747                 const ModelAction *reader) const
748 {
749         if (promises->empty())
750                 return true;
751         for(int i=promises->size()-1;i>=0;i--) {
752                 ModelAction *pr=(*promises)[i]->get_reader(0);
753                 //reader is after promise...doesn't cross any promise
754                 if (*reader > *pr)
755                         return true;
756                 //writer is after promise, reader before...bad...
757                 if (*writer > *pr)
758                         return false;
759         }
760         return true;
761 }
762
763 /**
764  * @brief Add a future value to a reader
765  *
766  * This function performs a few additional checks to ensure that the future
767  * value can be feasibly observed by the reader
768  *
769  * @param writer The operation whose value is sent. Must be a write.
770  * @param reader The read operation which may read the future value. Must be a read.
771  */
772 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
773 {
774         /* Do more ambitious checks now that mo is more complete */
775         if (!mo_may_allow(writer, reader))
776                 return;
777
778         Node *node = reader->get_node();
779
780         /* Find an ancestor thread which exists at the time of the reader */
781         Thread *write_thread = get_thread(writer);
782         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
783                 write_thread = write_thread->get_parent();
784
785         struct future_value fv = {
786                 writer->get_write_value(),
787                 writer->get_seq_number() + params->maxfuturedelay,
788                 write_thread->get_id(),
789         };
790         if (node->add_future_value(fv))
791                 set_latest_backtrack(reader);
792 }
793
794 /**
795  * Process a write ModelAction
796  * @param curr The ModelAction to process
797  * @return True if the mo_graph was updated or promises were resolved
798  */
799 bool ModelExecution::process_write(ModelAction *curr)
800 {
801         /* Readers to which we may send our future value */
802         ModelVector<ModelAction *> send_fv;
803
804         const ModelAction *earliest_promise_reader;
805         bool updated_promises = false;
806
807         bool updated_mod_order = w_modification_order(curr, &send_fv);
808         Promise *promise = pop_promise_to_resolve(curr);
809
810         if (promise) {
811                 earliest_promise_reader = promise->get_reader(0);
812                 updated_promises = resolve_promise(curr, promise);
813         } else
814                 earliest_promise_reader = NULL;
815
816         for (unsigned int i = 0; i < send_fv.size(); i++) {
817                 ModelAction *read = send_fv[i];
818
819                 /* Don't send future values to reads after the Promise we resolve */
820                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
821                         /* Check if future value can be sent immediately */
822                         if (promises_may_allow(curr, read)) {
823                                 add_future_value(curr, read);
824                         } else {
825                                 futurevalues->push_back(PendingFutureValue(curr, read));
826                         }
827                 }
828         }
829
830         /* Check the pending future values */
831         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
832                 struct PendingFutureValue pfv = (*futurevalues)[i];
833                 if (promises_may_allow(pfv.writer, pfv.reader)) {
834                         add_future_value(pfv.writer, pfv.reader);
835                         futurevalues->erase(futurevalues->begin() + i);
836                 }
837         }
838
839         mo_graph->commitChanges();
840         mo_check_promises(curr, false);
841
842         get_thread(curr)->set_return_value(VALUE_NONE);
843         return updated_mod_order || updated_promises;
844 }
845
846 /**
847  * Process a fence ModelAction
848  * @param curr The ModelAction to process
849  * @return True if synchronization was updated
850  */
851 bool ModelExecution::process_fence(ModelAction *curr)
852 {
853         /*
854          * fence-relaxed: no-op
855          * fence-release: only log the occurence (not in this function), for
856          *   use in later synchronization
857          * fence-acquire (this function): search for hypothetical release
858          *   sequences
859          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
860          */
861         bool updated = false;
862         if (curr->is_acquire()) {
863                 action_list_t *list = action_trace;
864                 action_list_t::reverse_iterator rit;
865                 /* Find X : is_read(X) && X --sb-> curr */
866                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
867                         ModelAction *act = *rit;
868                         if (act == curr)
869                                 continue;
870                         if (act->get_tid() != curr->get_tid())
871                                 continue;
872                         /* Stop at the beginning of the thread */
873                         if (act->is_thread_start())
874                                 break;
875                         /* Stop once we reach a prior fence-acquire */
876                         if (act->is_fence() && act->is_acquire())
877                                 break;
878                         if (!act->is_read())
879                                 continue;
880                         /* read-acquire will find its own release sequences */
881                         if (act->is_acquire())
882                                 continue;
883
884                         /* Establish hypothetical release sequences */
885                         rel_heads_list_t release_heads;
886                         get_release_seq_heads(curr, act, &release_heads);
887                         for (unsigned int i = 0; i < release_heads.size(); i++)
888                                 synchronize(release_heads[i], curr);
889                         if (release_heads.size() != 0)
890                                 updated = true;
891                 }
892         }
893         return updated;
894 }
895
896 /**
897  * @brief Process the current action for thread-related activity
898  *
899  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
900  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
901  * synchronization, etc.  This function is a no-op for non-THREAD actions
902  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
903  *
904  * @param curr The current action
905  * @return True if synchronization was updated or a thread completed
906  */
907 bool ModelExecution::process_thread_action(ModelAction *curr)
908 {
909         bool updated = false;
910
911         switch (curr->get_type()) {
912         case THREAD_CREATE: {
913                 thrd_t *thrd = (thrd_t *)curr->get_location();
914                 struct thread_params *params = (struct thread_params *)curr->get_value();
915                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
916                 add_thread(th);
917                 th->set_creation(curr);
918                 /* Promises can be satisfied by children */
919                 for (unsigned int i = 0; i < promises->size(); i++) {
920                         Promise *promise = (*promises)[i];
921                         if (promise->thread_is_available(curr->get_tid()))
922                                 promise->add_thread(th->get_id());
923                 }
924                 break;
925         }
926         case THREAD_JOIN: {
927                 Thread *blocking = curr->get_thread_operand();
928                 ModelAction *act = get_last_action(blocking->get_id());
929                 synchronize(act, curr);
930                 updated = true; /* trigger rel-seq checks */
931                 break;
932         }
933         case THREAD_FINISH: {
934                 Thread *th = get_thread(curr);
935                 /* Wake up any joining threads */
936                 for (unsigned int i = 0; i < get_num_threads(); i++) {
937                         Thread *waiting = get_thread(int_to_id(i));
938                         if (waiting->waiting_on() == th &&
939                                         waiting->get_pending()->is_thread_join())
940                                 scheduler->wake(waiting);
941                 }
942                 th->complete();
943                 /* Completed thread can't satisfy promises */
944                 for (unsigned int i = 0; i < promises->size(); i++) {
945                         Promise *promise = (*promises)[i];
946                         if (promise->thread_is_available(th->get_id()))
947                                 if (promise->eliminate_thread(th->get_id()))
948                                         priv->failed_promise = true;
949                 }
950                 updated = true; /* trigger rel-seq checks */
951                 break;
952         }
953         case THREAD_START: {
954                 check_promises(curr->get_tid(), NULL, curr->get_cv());
955                 break;
956         }
957         default:
958                 break;
959         }
960
961         return updated;
962 }
963
964 /**
965  * @brief Process the current action for release sequence fixup activity
966  *
967  * Performs model-checker release sequence fixups for the current action,
968  * forcing a single pending release sequence to break (with a given, potential
969  * "loose" write) or to complete (i.e., synchronize). If a pending release
970  * sequence forms a complete release sequence, then we must perform the fixup
971  * synchronization, mo_graph additions, etc.
972  *
973  * @param curr The current action; must be a release sequence fixup action
974  * @param work_queue The work queue to which to add work items as they are
975  * generated
976  */
977 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
978 {
979         const ModelAction *write = curr->get_node()->get_relseq_break();
980         struct release_seq *sequence = pending_rel_seqs->back();
981         pending_rel_seqs->pop_back();
982         ASSERT(sequence);
983         ModelAction *acquire = sequence->acquire;
984         const ModelAction *rf = sequence->rf;
985         const ModelAction *release = sequence->release;
986         ASSERT(acquire);
987         ASSERT(release);
988         ASSERT(rf);
989         ASSERT(release->same_thread(rf));
990
991         if (write == NULL) {
992                 /**
993                  * @todo Forcing a synchronization requires that we set
994                  * modification order constraints. For instance, we can't allow
995                  * a fixup sequence in which two separate read-acquire
996                  * operations read from the same sequence, where the first one
997                  * synchronizes and the other doesn't. Essentially, we can't
998                  * allow any writes to insert themselves between 'release' and
999                  * 'rf'
1000                  */
1001
1002                 /* Must synchronize */
1003                 if (!synchronize(release, acquire))
1004                         return;
1005                 /* Re-check all pending release sequences */
1006                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1007                 /* Re-check act for mo_graph edges */
1008                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1009
1010                 /* propagate synchronization to later actions */
1011                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1012                 for (; (*rit) != acquire; rit++) {
1013                         ModelAction *propagate = *rit;
1014                         if (acquire->happens_before(propagate)) {
1015                                 synchronize(acquire, propagate);
1016                                 /* Re-check 'propagate' for mo_graph edges */
1017                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1018                         }
1019                 }
1020         } else {
1021                 /* Break release sequence with new edges:
1022                  *   release --mo--> write --mo--> rf */
1023                 mo_graph->addEdge(release, write);
1024                 mo_graph->addEdge(write, rf);
1025         }
1026
1027         /* See if we have realized a data race */
1028         checkDataRaces();
1029 }
1030
1031 /**
1032  * Initialize the current action by performing one or more of the following
1033  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1034  * in the NodeStack, manipulating backtracking sets, allocating and
1035  * initializing clock vectors, and computing the promises to fulfill.
1036  *
1037  * @param curr The current action, as passed from the user context; may be
1038  * freed/invalidated after the execution of this function, with a different
1039  * action "returned" its place (pass-by-reference)
1040  * @return True if curr is a newly-explored action; false otherwise
1041  */
1042 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1043 {
1044         ModelAction *newcurr;
1045
1046         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1047                 newcurr = process_rmw(*curr);
1048                 delete *curr;
1049
1050                 if (newcurr->is_rmw())
1051                         compute_promises(newcurr);
1052
1053                 *curr = newcurr;
1054                 return false;
1055         }
1056
1057         (*curr)->set_seq_number(get_next_seq_num());
1058
1059         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1060         if (newcurr) {
1061                 /* First restore type and order in case of RMW operation */
1062                 if ((*curr)->is_rmwr())
1063                         newcurr->copy_typeandorder(*curr);
1064
1065                 ASSERT((*curr)->get_location() == newcurr->get_location());
1066                 newcurr->copy_from_new(*curr);
1067
1068                 /* Discard duplicate ModelAction; use action from NodeStack */
1069                 delete *curr;
1070
1071                 /* Always compute new clock vector */
1072                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1073
1074                 *curr = newcurr;
1075                 return false; /* Action was explored previously */
1076         } else {
1077                 newcurr = *curr;
1078
1079                 /* Always compute new clock vector */
1080                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1081
1082                 /* Assign most recent release fence */
1083                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1084
1085                 /*
1086                  * Perform one-time actions when pushing new ModelAction onto
1087                  * NodeStack
1088                  */
1089                 if (newcurr->is_write())
1090                         compute_promises(newcurr);
1091                 else if (newcurr->is_relseq_fixup())
1092                         compute_relseq_breakwrites(newcurr);
1093                 else if (newcurr->is_wait())
1094                         newcurr->get_node()->set_misc_max(2);
1095                 else if (newcurr->is_notify_one()) {
1096                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1097                 }
1098                 return true; /* This was a new ModelAction */
1099         }
1100 }
1101
1102 /**
1103  * @brief Establish reads-from relation between two actions
1104  *
1105  * Perform basic operations involved with establishing a concrete rf relation,
1106  * including setting the ModelAction data and checking for release sequences.
1107  *
1108  * @param act The action that is reading (must be a read)
1109  * @param rf The action from which we are reading (must be a write)
1110  *
1111  * @return True if this read established synchronization
1112  */
1113 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1114 {
1115         ASSERT(rf);
1116         ASSERT(rf->is_write());
1117
1118         act->set_read_from(rf);
1119         if (act->is_acquire()) {
1120                 rel_heads_list_t release_heads;
1121                 get_release_seq_heads(act, act, &release_heads);
1122                 int num_heads = release_heads.size();
1123                 for (unsigned int i = 0; i < release_heads.size(); i++)
1124                         if (!synchronize(release_heads[i], act))
1125                                 num_heads--;
1126                 return num_heads > 0;
1127         }
1128         return false;
1129 }
1130
1131 /**
1132  * @brief Synchronizes two actions
1133  *
1134  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1135  * This function performs the synchronization as well as providing other hooks
1136  * for other checks along with synchronization.
1137  *
1138  * @param first The left-hand side of the synchronizes-with relation
1139  * @param second The right-hand side of the synchronizes-with relation
1140  * @return True if the synchronization was successful (i.e., was consistent
1141  * with the execution order); false otherwise
1142  */
1143 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1144 {
1145         if (*second < *first) {
1146                 set_bad_synchronization();
1147                 return false;
1148         }
1149         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1150         return second->synchronize_with(first);
1151 }
1152
1153 /**
1154  * Check promises and eliminate potentially-satisfying threads when a thread is
1155  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1156  * no longer satisfy a promise generated from that thread.
1157  *
1158  * @param blocker The thread on which a thread is waiting
1159  * @param waiting The waiting thread
1160  */
1161 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1162 {
1163         for (unsigned int i = 0; i < promises->size(); i++) {
1164                 Promise *promise = (*promises)[i];
1165                 if (!promise->thread_is_available(waiting->get_id()))
1166                         continue;
1167                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1168                         ModelAction *reader = promise->get_reader(j);
1169                         if (reader->get_tid() != blocker->get_id())
1170                                 continue;
1171                         if (promise->eliminate_thread(waiting->get_id())) {
1172                                 /* Promise has failed */
1173                                 priv->failed_promise = true;
1174                         } else {
1175                                 /* Only eliminate the 'waiting' thread once */
1176                                 return;
1177                         }
1178                 }
1179         }
1180 }
1181
1182 /**
1183  * @brief Check whether a model action is enabled.
1184  *
1185  * Checks whether a lock or join operation would be successful (i.e., is the
1186  * lock already locked, or is the joined thread already complete). If not, put
1187  * the action in a waiter list.
1188  *
1189  * @param curr is the ModelAction to check whether it is enabled.
1190  * @return a bool that indicates whether the action is enabled.
1191  */
1192 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1193         if (curr->is_lock()) {
1194                 std::mutex *lock = curr->get_mutex();
1195                 struct std::mutex_state *state = lock->get_state();
1196                 if (state->locked)
1197                         return false;
1198         } else if (curr->is_thread_join()) {
1199                 Thread *blocking = curr->get_thread_operand();
1200                 if (!blocking->is_complete()) {
1201                         thread_blocking_check_promises(blocking, get_thread(curr));
1202                         return false;
1203                 }
1204         }
1205
1206         return true;
1207 }
1208
1209 /**
1210  * This is the heart of the model checker routine. It performs model-checking
1211  * actions corresponding to a given "current action." Among other processes, it
1212  * calculates reads-from relationships, updates synchronization clock vectors,
1213  * forms a memory_order constraints graph, and handles replay/backtrack
1214  * execution when running permutations of previously-observed executions.
1215  *
1216  * @param curr The current action to process
1217  * @return The ModelAction that is actually executed; may be different than
1218  * curr; may be NULL, if the current action is not enabled to run
1219  */
1220 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1221 {
1222         ASSERT(curr);
1223         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1224         bool newly_explored = initialize_curr_action(&curr);
1225
1226         DBG();
1227
1228         wake_up_sleeping_actions(curr);
1229
1230         /* Compute fairness information for CHESS yield algorithm */
1231         if (params->yieldon) {
1232                 curr->get_node()->update_yield(scheduler);
1233         }
1234
1235         /* Add the action to lists before any other model-checking tasks */
1236         if (!second_part_of_rmw)
1237                 add_action_to_lists(curr);
1238
1239         /* Build may_read_from set for newly-created actions */
1240         if (newly_explored && curr->is_read())
1241                 build_may_read_from(curr);
1242
1243         /* Initialize work_queue with the "current action" work */
1244         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1245         while (!work_queue.empty() && !has_asserted()) {
1246                 WorkQueueEntry work = work_queue.front();
1247                 work_queue.pop_front();
1248
1249                 switch (work.type) {
1250                 case WORK_CHECK_CURR_ACTION: {
1251                         ModelAction *act = work.action;
1252                         bool update = false; /* update this location's release seq's */
1253                         bool update_all = false; /* update all release seq's */
1254
1255                         if (process_thread_action(curr))
1256                                 update_all = true;
1257
1258                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1259                                 update = true;
1260
1261                         if (act->is_write() && process_write(act))
1262                                 update = true;
1263
1264                         if (act->is_fence() && process_fence(act))
1265                                 update_all = true;
1266
1267                         if (act->is_mutex_op() && process_mutex(act))
1268                                 update_all = true;
1269
1270                         if (act->is_relseq_fixup())
1271                                 process_relseq_fixup(curr, &work_queue);
1272
1273                         if (update_all)
1274                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1275                         else if (update)
1276                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1277                         break;
1278                 }
1279                 case WORK_CHECK_RELEASE_SEQ:
1280                         resolve_release_sequences(work.location, &work_queue);
1281                         break;
1282                 case WORK_CHECK_MO_EDGES: {
1283                         /** @todo Complete verification of work_queue */
1284                         ModelAction *act = work.action;
1285                         bool updated = false;
1286
1287                         if (act->is_read()) {
1288                                 const ModelAction *rf = act->get_reads_from();
1289                                 const Promise *promise = act->get_reads_from_promise();
1290                                 if (rf) {
1291                                         if (r_modification_order(act, rf))
1292                                                 updated = true;
1293                                 } else if (promise) {
1294                                         if (r_modification_order(act, promise))
1295                                                 updated = true;
1296                                 }
1297                         }
1298                         if (act->is_write()) {
1299                                 if (w_modification_order(act, NULL))
1300                                         updated = true;
1301                         }
1302                         mo_graph->commitChanges();
1303
1304                         if (updated)
1305                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1306                         break;
1307                 }
1308                 default:
1309                         ASSERT(false);
1310                         break;
1311                 }
1312         }
1313
1314         check_curr_backtracking(curr);
1315         set_backtracking(curr);
1316         return curr;
1317 }
1318
1319 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1320 {
1321         Node *currnode = curr->get_node();
1322         Node *parnode = currnode->get_parent();
1323
1324         if ((parnode && !parnode->backtrack_empty()) ||
1325                          !currnode->misc_empty() ||
1326                          !currnode->read_from_empty() ||
1327                          !currnode->promise_empty() ||
1328                          !currnode->relseq_break_empty()) {
1329                 set_latest_backtrack(curr);
1330         }
1331 }
1332
1333 bool ModelExecution::promises_expired() const
1334 {
1335         for (unsigned int i = 0; i < promises->size(); i++) {
1336                 Promise *promise = (*promises)[i];
1337                 if (promise->get_expiration() < priv->used_sequence_numbers)
1338                         return true;
1339         }
1340         return false;
1341 }
1342
1343 /**
1344  * This is the strongest feasibility check available.
1345  * @return whether the current trace (partial or complete) must be a prefix of
1346  * a feasible trace.
1347  */
1348 bool ModelExecution::isfeasibleprefix() const
1349 {
1350         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1351 }
1352
1353 /**
1354  * Print disagnostic information about an infeasible execution
1355  * @param prefix A string to prefix the output with; if NULL, then a default
1356  * message prefix will be provided
1357  */
1358 void ModelExecution::print_infeasibility(const char *prefix) const
1359 {
1360         char buf[100];
1361         char *ptr = buf;
1362         if (mo_graph->checkForCycles())
1363                 ptr += sprintf(ptr, "[mo cycle]");
1364         if (priv->failed_promise)
1365                 ptr += sprintf(ptr, "[failed promise]");
1366         if (priv->too_many_reads)
1367                 ptr += sprintf(ptr, "[too many reads]");
1368         if (priv->no_valid_reads)
1369                 ptr += sprintf(ptr, "[no valid reads-from]");
1370         if (priv->bad_synchronization)
1371                 ptr += sprintf(ptr, "[bad sw ordering]");
1372         if (promises_expired())
1373                 ptr += sprintf(ptr, "[promise expired]");
1374         if (promises->size() != 0)
1375                 ptr += sprintf(ptr, "[unresolved promise]");
1376         if (ptr != buf)
1377                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1378 }
1379
1380 /**
1381  * Returns whether the current completed trace is feasible, except for pending
1382  * release sequences.
1383  */
1384 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1385 {
1386         return !is_infeasible() && promises->size() == 0;
1387 }
1388
1389 /**
1390  * Check if the current partial trace is infeasible. Does not check any
1391  * end-of-execution flags, which might rule out the execution. Thus, this is
1392  * useful only for ruling an execution as infeasible.
1393  * @return whether the current partial trace is infeasible.
1394  */
1395 bool ModelExecution::is_infeasible() const
1396 {
1397         return mo_graph->checkForCycles() ||
1398                 priv->no_valid_reads ||
1399                 priv->failed_promise ||
1400                 priv->too_many_reads ||
1401                 priv->bad_synchronization ||
1402                 promises_expired();
1403 }
1404
1405 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1406 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1407         ModelAction *lastread = get_last_action(act->get_tid());
1408         lastread->process_rmw(act);
1409         if (act->is_rmw()) {
1410                 if (lastread->get_reads_from())
1411                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1412                 else
1413                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1414                 mo_graph->commitChanges();
1415         }
1416         return lastread;
1417 }
1418
1419 /**
1420  * A helper function for ModelExecution::check_recency, to check if the current
1421  * thread is able to read from a different write/promise for 'params.maxreads'
1422  * number of steps and if that write/promise should become visible (i.e., is
1423  * ordered later in the modification order). This helps model memory liveness.
1424  *
1425  * @param curr The current action. Must be a read.
1426  * @param rf The write/promise from which we plan to read
1427  * @param other_rf The write/promise from which we may read
1428  * @return True if we were able to read from other_rf for params.maxreads steps
1429  */
1430 template <typename T, typename U>
1431 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1432 {
1433         /* Need a different write/promise */
1434         if (other_rf->equals(rf))
1435                 return false;
1436
1437         /* Only look for "newer" writes/promises */
1438         if (!mo_graph->checkReachable(rf, other_rf))
1439                 return false;
1440
1441         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1442         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1443         action_list_t::reverse_iterator rit = list->rbegin();
1444         ASSERT((*rit) == curr);
1445         /* Skip past curr */
1446         rit++;
1447
1448         /* Does this write/promise work for everyone? */
1449         for (int i = 0; i < params->maxreads; i++, rit++) {
1450                 ModelAction *act = *rit;
1451                 if (!act->may_read_from(other_rf))
1452                         return false;
1453         }
1454         return true;
1455 }
1456
1457 /**
1458  * Checks whether a thread has read from the same write or Promise for too many
1459  * times without seeing the effects of a later write/Promise.
1460  *
1461  * Basic idea:
1462  * 1) there must a different write/promise that we could read from,
1463  * 2) we must have read from the same write/promise in excess of maxreads times,
1464  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1465  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1466  *
1467  * If so, we decide that the execution is no longer feasible.
1468  *
1469  * @param curr The current action. Must be a read.
1470  * @param rf The ModelAction/Promise from which we might read.
1471  * @return True if the read should succeed; false otherwise
1472  */
1473 template <typename T>
1474 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1475 {
1476         if (!params->maxreads)
1477                 return true;
1478
1479         //NOTE: Next check is just optimization, not really necessary....
1480         if (curr->get_node()->get_read_from_past_size() +
1481                         curr->get_node()->get_read_from_promise_size() <= 1)
1482                 return true;
1483
1484         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1485         int tid = id_to_int(curr->get_tid());
1486         ASSERT(tid < (int)thrd_lists->size());
1487         action_list_t *list = &(*thrd_lists)[tid];
1488         action_list_t::reverse_iterator rit = list->rbegin();
1489         ASSERT((*rit) == curr);
1490         /* Skip past curr */
1491         rit++;
1492
1493         action_list_t::reverse_iterator ritcopy = rit;
1494         /* See if we have enough reads from the same value */
1495         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1496                 if (ritcopy == list->rend())
1497                         return true;
1498                 ModelAction *act = *ritcopy;
1499                 if (!act->is_read())
1500                         return true;
1501                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1502                         return true;
1503                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1504                         return true;
1505                 if (act->get_node()->get_read_from_past_size() +
1506                                 act->get_node()->get_read_from_promise_size() <= 1)
1507                         return true;
1508         }
1509         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1510                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1511                 if (should_read_instead(curr, rf, write))
1512                         return false; /* liveness failure */
1513         }
1514         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1515                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1516                 if (should_read_instead(curr, rf, promise))
1517                         return false; /* liveness failure */
1518         }
1519         return true;
1520 }
1521
1522 /**
1523  * @brief Updates the mo_graph with the constraints imposed from the current
1524  * read.
1525  *
1526  * Basic idea is the following: Go through each other thread and find
1527  * the last action that happened before our read.  Two cases:
1528  *
1529  * -# The action is a write: that write must either occur before
1530  * the write we read from or be the write we read from.
1531  * -# The action is a read: the write that that action read from
1532  * must occur before the write we read from or be the same write.
1533  *
1534  * @param curr The current action. Must be a read.
1535  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1536  * @return True if modification order edges were added; false otherwise
1537  */
1538 template <typename rf_type>
1539 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1540 {
1541         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1542         unsigned int i;
1543         bool added = false;
1544         ASSERT(curr->is_read());
1545
1546         /* Last SC fence in the current thread */
1547         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1548         ModelAction *last_sc_write = NULL;
1549         if (curr->is_seqcst())
1550                 last_sc_write = get_last_seq_cst_write(curr);
1551
1552         /* Iterate over all threads */
1553         for (i = 0; i < thrd_lists->size(); i++) {
1554                 /* Last SC fence in thread i */
1555                 ModelAction *last_sc_fence_thread_local = NULL;
1556                 if (int_to_id((int)i) != curr->get_tid())
1557                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1558
1559                 /* Last SC fence in thread i, before last SC fence in current thread */
1560                 ModelAction *last_sc_fence_thread_before = NULL;
1561                 if (last_sc_fence_local)
1562                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1563
1564                 /* Iterate over actions in thread, starting from most recent */
1565                 action_list_t *list = &(*thrd_lists)[i];
1566                 action_list_t::reverse_iterator rit;
1567                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1568                         ModelAction *act = *rit;
1569
1570                         /* Skip curr */
1571                         if (act == curr)
1572                                 continue;
1573                         /* Don't want to add reflexive edges on 'rf' */
1574                         if (act->equals(rf)) {
1575                                 if (act->happens_before(curr))
1576                                         break;
1577                                 else
1578                                         continue;
1579                         }
1580
1581                         if (act->is_write()) {
1582                                 /* C++, Section 29.3 statement 5 */
1583                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1584                                                 *act < *last_sc_fence_thread_local) {
1585                                         added = mo_graph->addEdge(act, rf) || added;
1586                                         break;
1587                                 }
1588                                 /* C++, Section 29.3 statement 4 */
1589                                 else if (act->is_seqcst() && last_sc_fence_local &&
1590                                                 *act < *last_sc_fence_local) {
1591                                         added = mo_graph->addEdge(act, rf) || added;
1592                                         break;
1593                                 }
1594                                 /* C++, Section 29.3 statement 6 */
1595                                 else if (last_sc_fence_thread_before &&
1596                                                 *act < *last_sc_fence_thread_before) {
1597                                         added = mo_graph->addEdge(act, rf) || added;
1598                                         break;
1599                                 }
1600                         }
1601
1602                         /* C++, Section 29.3 statement 3 (second subpoint) */
1603                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1604                                 added = mo_graph->addEdge(act, rf) || added;
1605                                 break;
1606                         }
1607
1608                         /*
1609                          * Include at most one act per-thread that "happens
1610                          * before" curr
1611                          */
1612                         if (act->happens_before(curr)) {
1613                                 if (act->is_write()) {
1614                                         added = mo_graph->addEdge(act, rf) || added;
1615                                 } else {
1616                                         const ModelAction *prevrf = act->get_reads_from();
1617                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1618                                         if (prevrf) {
1619                                                 if (!prevrf->equals(rf))
1620                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1621                                         } else if (!prevrf_promise->equals(rf)) {
1622                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1623                                         }
1624                                 }
1625                                 break;
1626                         }
1627                 }
1628         }
1629
1630         /*
1631          * All compatible, thread-exclusive promises must be ordered after any
1632          * concrete loads from the same thread
1633          */
1634         for (unsigned int i = 0; i < promises->size(); i++)
1635                 if ((*promises)[i]->is_compatible_exclusive(curr))
1636                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1637
1638         return added;
1639 }
1640
1641 /**
1642  * Updates the mo_graph with the constraints imposed from the current write.
1643  *
1644  * Basic idea is the following: Go through each other thread and find
1645  * the lastest action that happened before our write.  Two cases:
1646  *
1647  * (1) The action is a write => that write must occur before
1648  * the current write
1649  *
1650  * (2) The action is a read => the write that that action read from
1651  * must occur before the current write.
1652  *
1653  * This method also handles two other issues:
1654  *
1655  * (I) Sequential Consistency: Making sure that if the current write is
1656  * seq_cst, that it occurs after the previous seq_cst write.
1657  *
1658  * (II) Sending the write back to non-synchronizing reads.
1659  *
1660  * @param curr The current action. Must be a write.
1661  * @param send_fv A vector for stashing reads to which we may pass our future
1662  * value. If NULL, then don't record any future values.
1663  * @return True if modification order edges were added; false otherwise
1664  */
1665 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1666 {
1667         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1668         unsigned int i;
1669         bool added = false;
1670         ASSERT(curr->is_write());
1671
1672         if (curr->is_seqcst()) {
1673                 /* We have to at least see the last sequentially consistent write,
1674                          so we are initialized. */
1675                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1676                 if (last_seq_cst != NULL) {
1677                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1678                 }
1679         }
1680
1681         /* Last SC fence in the current thread */
1682         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1683
1684         /* Iterate over all threads */
1685         for (i = 0; i < thrd_lists->size(); i++) {
1686                 /* Last SC fence in thread i, before last SC fence in current thread */
1687                 ModelAction *last_sc_fence_thread_before = NULL;
1688                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1689                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1690
1691                 /* Iterate over actions in thread, starting from most recent */
1692                 action_list_t *list = &(*thrd_lists)[i];
1693                 action_list_t::reverse_iterator rit;
1694                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1695                         ModelAction *act = *rit;
1696                         if (act == curr) {
1697                                 /*
1698                                  * 1) If RMW and it actually read from something, then we
1699                                  * already have all relevant edges, so just skip to next
1700                                  * thread.
1701                                  *
1702                                  * 2) If RMW and it didn't read from anything, we should
1703                                  * whatever edge we can get to speed up convergence.
1704                                  *
1705                                  * 3) If normal write, we need to look at earlier actions, so
1706                                  * continue processing list.
1707                                  */
1708                                 if (curr->is_rmw()) {
1709                                         if (curr->get_reads_from() != NULL)
1710                                                 break;
1711                                         else
1712                                                 continue;
1713                                 } else
1714                                         continue;
1715                         }
1716
1717                         /* C++, Section 29.3 statement 7 */
1718                         if (last_sc_fence_thread_before && act->is_write() &&
1719                                         *act < *last_sc_fence_thread_before) {
1720                                 added = mo_graph->addEdge(act, curr) || added;
1721                                 break;
1722                         }
1723
1724                         /*
1725                          * Include at most one act per-thread that "happens
1726                          * before" curr
1727                          */
1728                         if (act->happens_before(curr)) {
1729                                 /*
1730                                  * Note: if act is RMW, just add edge:
1731                                  *   act --mo--> curr
1732                                  * The following edge should be handled elsewhere:
1733                                  *   readfrom(act) --mo--> act
1734                                  */
1735                                 if (act->is_write())
1736                                         added = mo_graph->addEdge(act, curr) || added;
1737                                 else if (act->is_read()) {
1738                                         //if previous read accessed a null, just keep going
1739                                         if (act->get_reads_from() == NULL)
1740                                                 continue;
1741                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1742                                 }
1743                                 break;
1744                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1745                                                      !act->same_thread(curr)) {
1746                                 /* We have an action that:
1747                                    (1) did not happen before us
1748                                    (2) is a read and we are a write
1749                                    (3) cannot synchronize with us
1750                                    (4) is in a different thread
1751                                    =>
1752                                    that read could potentially read from our write.  Note that
1753                                    these checks are overly conservative at this point, we'll
1754                                    do more checks before actually removing the
1755                                    pendingfuturevalue.
1756
1757                                  */
1758                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1759                                         if (!is_infeasible())
1760                                                 send_fv->push_back(act);
1761                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1762                                                 add_future_value(curr, act);
1763                                 }
1764                         }
1765                 }
1766         }
1767
1768         /*
1769          * All compatible, thread-exclusive promises must be ordered after any
1770          * concrete stores to the same thread, or else they can be merged with
1771          * this store later
1772          */
1773         for (unsigned int i = 0; i < promises->size(); i++)
1774                 if ((*promises)[i]->is_compatible_exclusive(curr))
1775                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1776
1777         return added;
1778 }
1779
1780 /** Arbitrary reads from the future are not allowed.  Section 29.3
1781  * part 9 places some constraints.  This method checks one result of constraint
1782  * constraint.  Others require compiler support. */
1783 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1784 {
1785         if (!writer->is_rmw())
1786                 return true;
1787
1788         if (!reader->is_rmw())
1789                 return true;
1790
1791         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1792                 if (search == reader)
1793                         return false;
1794                 if (search->get_tid() == reader->get_tid() &&
1795                                 search->happens_before(reader))
1796                         break;
1797         }
1798
1799         return true;
1800 }
1801
1802 /**
1803  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1804  * some constraints. This method checks one the following constraint (others
1805  * require compiler support):
1806  *
1807  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1808  */
1809 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1810 {
1811         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1812         unsigned int i;
1813         /* Iterate over all threads */
1814         for (i = 0; i < thrd_lists->size(); i++) {
1815                 const ModelAction *write_after_read = NULL;
1816
1817                 /* Iterate over actions in thread, starting from most recent */
1818                 action_list_t *list = &(*thrd_lists)[i];
1819                 action_list_t::reverse_iterator rit;
1820                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1821                         ModelAction *act = *rit;
1822
1823                         /* Don't disallow due to act == reader */
1824                         if (!reader->happens_before(act) || reader == act)
1825                                 break;
1826                         else if (act->is_write())
1827                                 write_after_read = act;
1828                         else if (act->is_read() && act->get_reads_from() != NULL)
1829                                 write_after_read = act->get_reads_from();
1830                 }
1831
1832                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1833                         return false;
1834         }
1835         return true;
1836 }
1837
1838 /**
1839  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1840  * The ModelAction under consideration is expected to be taking part in
1841  * release/acquire synchronization as an object of the "reads from" relation.
1842  * Note that this can only provide release sequence support for RMW chains
1843  * which do not read from the future, as those actions cannot be traced until
1844  * their "promise" is fulfilled. Similarly, we may not even establish the
1845  * presence of a release sequence with certainty, as some modification order
1846  * constraints may be decided further in the future. Thus, this function
1847  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1848  * and a boolean representing certainty.
1849  *
1850  * @param rf The action that might be part of a release sequence. Must be a
1851  * write.
1852  * @param release_heads A pass-by-reference style return parameter. After
1853  * execution of this function, release_heads will contain the heads of all the
1854  * relevant release sequences, if any exists with certainty
1855  * @param pending A pass-by-reference style return parameter which is only used
1856  * when returning false (i.e., uncertain). Returns most information regarding
1857  * an uncertain release sequence, including any write operations that might
1858  * break the sequence.
1859  * @return true, if the ModelExecution is certain that release_heads is complete;
1860  * false otherwise
1861  */
1862 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1863                 rel_heads_list_t *release_heads,
1864                 struct release_seq *pending) const
1865 {
1866         /* Only check for release sequences if there are no cycles */
1867         if (mo_graph->checkForCycles())
1868                 return false;
1869
1870         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1871                 ASSERT(rf->is_write());
1872
1873                 if (rf->is_release())
1874                         release_heads->push_back(rf);
1875                 else if (rf->get_last_fence_release())
1876                         release_heads->push_back(rf->get_last_fence_release());
1877                 if (!rf->is_rmw())
1878                         break; /* End of RMW chain */
1879
1880                 /** @todo Need to be smarter here...  In the linux lock
1881                  * example, this will run to the beginning of the program for
1882                  * every acquire. */
1883                 /** @todo The way to be smarter here is to keep going until 1
1884                  * thread has a release preceded by an acquire and you've seen
1885                  *       both. */
1886
1887                 /* acq_rel RMW is a sufficient stopping condition */
1888                 if (rf->is_acquire() && rf->is_release())
1889                         return true; /* complete */
1890         };
1891         if (!rf) {
1892                 /* read from future: need to settle this later */
1893                 pending->rf = NULL;
1894                 return false; /* incomplete */
1895         }
1896
1897         if (rf->is_release())
1898                 return true; /* complete */
1899
1900         /* else relaxed write
1901          * - check for fence-release in the same thread (29.8, stmt. 3)
1902          * - check modification order for contiguous subsequence
1903          *   -> rf must be same thread as release */
1904
1905         const ModelAction *fence_release = rf->get_last_fence_release();
1906         /* Synchronize with a fence-release unconditionally; we don't need to
1907          * find any more "contiguous subsequence..." for it */
1908         if (fence_release)
1909                 release_heads->push_back(fence_release);
1910
1911         int tid = id_to_int(rf->get_tid());
1912         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1913         action_list_t *list = &(*thrd_lists)[tid];
1914         action_list_t::const_reverse_iterator rit;
1915
1916         /* Find rf in the thread list */
1917         rit = std::find(list->rbegin(), list->rend(), rf);
1918         ASSERT(rit != list->rend());
1919
1920         /* Find the last {write,fence}-release */
1921         for (; rit != list->rend(); rit++) {
1922                 if (fence_release && *(*rit) < *fence_release)
1923                         break;
1924                 if ((*rit)->is_release())
1925                         break;
1926         }
1927         if (rit == list->rend()) {
1928                 /* No write-release in this thread */
1929                 return true; /* complete */
1930         } else if (fence_release && *(*rit) < *fence_release) {
1931                 /* The fence-release is more recent (and so, "stronger") than
1932                  * the most recent write-release */
1933                 return true; /* complete */
1934         } /* else, need to establish contiguous release sequence */
1935         ModelAction *release = *rit;
1936
1937         ASSERT(rf->same_thread(release));
1938
1939         pending->writes.clear();
1940
1941         bool certain = true;
1942         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1943                 if (id_to_int(rf->get_tid()) == (int)i)
1944                         continue;
1945                 list = &(*thrd_lists)[i];
1946
1947                 /* Can we ensure no future writes from this thread may break
1948                  * the release seq? */
1949                 bool future_ordered = false;
1950
1951                 ModelAction *last = get_last_action(int_to_id(i));
1952                 Thread *th = get_thread(int_to_id(i));
1953                 if ((last && rf->happens_before(last)) ||
1954                                 !is_enabled(th) ||
1955                                 th->is_complete())
1956                         future_ordered = true;
1957
1958                 ASSERT(!th->is_model_thread() || future_ordered);
1959
1960                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1961                         const ModelAction *act = *rit;
1962                         /* Reach synchronization -> this thread is complete */
1963                         if (act->happens_before(release))
1964                                 break;
1965                         if (rf->happens_before(act)) {
1966                                 future_ordered = true;
1967                                 continue;
1968                         }
1969
1970                         /* Only non-RMW writes can break release sequences */
1971                         if (!act->is_write() || act->is_rmw())
1972                                 continue;
1973
1974                         /* Check modification order */
1975                         if (mo_graph->checkReachable(rf, act)) {
1976                                 /* rf --mo--> act */
1977                                 future_ordered = true;
1978                                 continue;
1979                         }
1980                         if (mo_graph->checkReachable(act, release))
1981                                 /* act --mo--> release */
1982                                 break;
1983                         if (mo_graph->checkReachable(release, act) &&
1984                                       mo_graph->checkReachable(act, rf)) {
1985                                 /* release --mo-> act --mo--> rf */
1986                                 return true; /* complete */
1987                         }
1988                         /* act may break release sequence */
1989                         pending->writes.push_back(act);
1990                         certain = false;
1991                 }
1992                 if (!future_ordered)
1993                         certain = false; /* This thread is uncertain */
1994         }
1995
1996         if (certain) {
1997                 release_heads->push_back(release);
1998                 pending->writes.clear();
1999         } else {
2000                 pending->release = release;
2001                 pending->rf = rf;
2002         }
2003         return certain;
2004 }
2005
2006 /**
2007  * An interface for getting the release sequence head(s) with which a
2008  * given ModelAction must synchronize. This function only returns a non-empty
2009  * result when it can locate a release sequence head with certainty. Otherwise,
2010  * it may mark the internal state of the ModelExecution so that it will handle
2011  * the release sequence at a later time, causing @a acquire to update its
2012  * synchronization at some later point in execution.
2013  *
2014  * @param acquire The 'acquire' action that may synchronize with a release
2015  * sequence
2016  * @param read The read action that may read from a release sequence; this may
2017  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2018  * when 'acquire' is a fence-acquire)
2019  * @param release_heads A pass-by-reference return parameter. Will be filled
2020  * with the head(s) of the release sequence(s), if they exists with certainty.
2021  * @see ModelExecution::release_seq_heads
2022  */
2023 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2024                 ModelAction *read, rel_heads_list_t *release_heads)
2025 {
2026         const ModelAction *rf = read->get_reads_from();
2027         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2028         sequence->acquire = acquire;
2029         sequence->read = read;
2030
2031         if (!release_seq_heads(rf, release_heads, sequence)) {
2032                 /* add act to 'lazy checking' list */
2033                 pending_rel_seqs->push_back(sequence);
2034         } else {
2035                 snapshot_free(sequence);
2036         }
2037 }
2038
2039 /**
2040  * Attempt to resolve all stashed operations that might synchronize with a
2041  * release sequence for a given location. This implements the "lazy" portion of
2042  * determining whether or not a release sequence was contiguous, since not all
2043  * modification order information is present at the time an action occurs.
2044  *
2045  * @param location The location/object that should be checked for release
2046  * sequence resolutions. A NULL value means to check all locations.
2047  * @param work_queue The work queue to which to add work items as they are
2048  * generated
2049  * @return True if any updates occurred (new synchronization, new mo_graph
2050  * edges)
2051  */
2052 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2053 {
2054         bool updated = false;
2055         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2056         while (it != pending_rel_seqs->end()) {
2057                 struct release_seq *pending = *it;
2058                 ModelAction *acquire = pending->acquire;
2059                 const ModelAction *read = pending->read;
2060
2061                 /* Only resolve sequences on the given location, if provided */
2062                 if (location && read->get_location() != location) {
2063                         it++;
2064                         continue;
2065                 }
2066
2067                 const ModelAction *rf = read->get_reads_from();
2068                 rel_heads_list_t release_heads;
2069                 bool complete;
2070                 complete = release_seq_heads(rf, &release_heads, pending);
2071                 for (unsigned int i = 0; i < release_heads.size(); i++)
2072                         if (!acquire->has_synchronized_with(release_heads[i]))
2073                                 if (synchronize(release_heads[i], acquire))
2074                                         updated = true;
2075
2076                 if (updated) {
2077                         /* Re-check all pending release sequences */
2078                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2079                         /* Re-check read-acquire for mo_graph edges */
2080                         if (acquire->is_read())
2081                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2082
2083                         /* propagate synchronization to later actions */
2084                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2085                         for (; (*rit) != acquire; rit++) {
2086                                 ModelAction *propagate = *rit;
2087                                 if (acquire->happens_before(propagate)) {
2088                                         synchronize(acquire, propagate);
2089                                         /* Re-check 'propagate' for mo_graph edges */
2090                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2091                                 }
2092                         }
2093                 }
2094                 if (complete) {
2095                         it = pending_rel_seqs->erase(it);
2096                         snapshot_free(pending);
2097                 } else {
2098                         it++;
2099                 }
2100         }
2101
2102         // If we resolved promises or data races, see if we have realized a data race.
2103         checkDataRaces();
2104
2105         return updated;
2106 }
2107
2108 /**
2109  * Performs various bookkeeping operations for the current ModelAction. For
2110  * instance, adds action to the per-object, per-thread action vector and to the
2111  * action trace list of all thread actions.
2112  *
2113  * @param act is the ModelAction to add.
2114  */
2115 void ModelExecution::add_action_to_lists(ModelAction *act)
2116 {
2117         int tid = id_to_int(act->get_tid());
2118         ModelAction *uninit = NULL;
2119         int uninit_id = -1;
2120         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2121         if (list->empty() && act->is_atomic_var()) {
2122                 uninit = get_uninitialized_action(act);
2123                 uninit_id = id_to_int(uninit->get_tid());
2124                 list->push_front(uninit);
2125         }
2126         list->push_back(act);
2127
2128         action_trace->push_back(act);
2129         if (uninit)
2130                 action_trace->push_front(uninit);
2131
2132         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2133         if (tid >= (int)vec->size())
2134                 vec->resize(priv->next_thread_id);
2135         (*vec)[tid].push_back(act);
2136         if (uninit)
2137                 (*vec)[uninit_id].push_front(uninit);
2138
2139         if ((int)thrd_last_action->size() <= tid)
2140                 thrd_last_action->resize(get_num_threads());
2141         (*thrd_last_action)[tid] = act;
2142         if (uninit)
2143                 (*thrd_last_action)[uninit_id] = uninit;
2144
2145         if (act->is_fence() && act->is_release()) {
2146                 if ((int)thrd_last_fence_release->size() <= tid)
2147                         thrd_last_fence_release->resize(get_num_threads());
2148                 (*thrd_last_fence_release)[tid] = act;
2149         }
2150
2151         if (act->is_wait()) {
2152                 void *mutex_loc = (void *) act->get_value();
2153                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2154
2155                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2156                 if (tid >= (int)vec->size())
2157                         vec->resize(priv->next_thread_id);
2158                 (*vec)[tid].push_back(act);
2159         }
2160 }
2161
2162 /**
2163  * @brief Get the last action performed by a particular Thread
2164  * @param tid The thread ID of the Thread in question
2165  * @return The last action in the thread
2166  */
2167 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2168 {
2169         int threadid = id_to_int(tid);
2170         if (threadid < (int)thrd_last_action->size())
2171                 return (*thrd_last_action)[id_to_int(tid)];
2172         else
2173                 return NULL;
2174 }
2175
2176 /**
2177  * @brief Get the last fence release performed by a particular Thread
2178  * @param tid The thread ID of the Thread in question
2179  * @return The last fence release in the thread, if one exists; NULL otherwise
2180  */
2181 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2182 {
2183         int threadid = id_to_int(tid);
2184         if (threadid < (int)thrd_last_fence_release->size())
2185                 return (*thrd_last_fence_release)[id_to_int(tid)];
2186         else
2187                 return NULL;
2188 }
2189
2190 /**
2191  * Gets the last memory_order_seq_cst write (in the total global sequence)
2192  * performed on a particular object (i.e., memory location), not including the
2193  * current action.
2194  * @param curr The current ModelAction; also denotes the object location to
2195  * check
2196  * @return The last seq_cst write
2197  */
2198 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2199 {
2200         void *location = curr->get_location();
2201         action_list_t *list = get_safe_ptr_action(obj_map, location);
2202         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2203         action_list_t::reverse_iterator rit;
2204         for (rit = list->rbegin(); (*rit) != curr; rit++)
2205                 ;
2206         rit++; /* Skip past curr */
2207         for ( ; rit != list->rend(); rit++)
2208                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2209                         return *rit;
2210         return NULL;
2211 }
2212
2213 /**
2214  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2215  * performed in a particular thread, prior to a particular fence.
2216  * @param tid The ID of the thread to check
2217  * @param before_fence The fence from which to begin the search; if NULL, then
2218  * search for the most recent fence in the thread.
2219  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2220  */
2221 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2222 {
2223         /* All fences should have NULL location */
2224         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2225         action_list_t::reverse_iterator rit = list->rbegin();
2226
2227         if (before_fence) {
2228                 for (; rit != list->rend(); rit++)
2229                         if (*rit == before_fence)
2230                                 break;
2231
2232                 ASSERT(*rit == before_fence);
2233                 rit++;
2234         }
2235
2236         for (; rit != list->rend(); rit++)
2237                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2238                         return *rit;
2239         return NULL;
2240 }
2241
2242 /**
2243  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2244  * location). This function identifies the mutex according to the current
2245  * action, which is presumed to perform on the same mutex.
2246  * @param curr The current ModelAction; also denotes the object location to
2247  * check
2248  * @return The last unlock operation
2249  */
2250 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2251 {
2252         void *location = curr->get_location();
2253         action_list_t *list = get_safe_ptr_action(obj_map, location);
2254         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2255         action_list_t::reverse_iterator rit;
2256         for (rit = list->rbegin(); rit != list->rend(); rit++)
2257                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2258                         return *rit;
2259         return NULL;
2260 }
2261
2262 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2263 {
2264         ModelAction *parent = get_last_action(tid);
2265         if (!parent)
2266                 parent = get_thread(tid)->get_creation();
2267         return parent;
2268 }
2269
2270 /**
2271  * Returns the clock vector for a given thread.
2272  * @param tid The thread whose clock vector we want
2273  * @return Desired clock vector
2274  */
2275 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2276 {
2277         return get_parent_action(tid)->get_cv();
2278 }
2279
2280 /**
2281  * @brief Find the promise (if any) to resolve for the current action and
2282  * remove it from the pending promise vector
2283  * @param curr The current ModelAction. Should be a write.
2284  * @return The Promise to resolve, if any; otherwise NULL
2285  */
2286 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2287 {
2288         for (unsigned int i = 0; i < promises->size(); i++)
2289                 if (curr->get_node()->get_promise(i)) {
2290                         Promise *ret = (*promises)[i];
2291                         promises->erase(promises->begin() + i);
2292                         return ret;
2293                 }
2294         return NULL;
2295 }
2296
2297 /**
2298  * Resolve a Promise with a current write.
2299  * @param write The ModelAction that is fulfilling Promises
2300  * @param promise The Promise to resolve
2301  * @return True if the Promise was successfully resolved; false otherwise
2302  */
2303 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2304 {
2305         ModelVector<ModelAction *> actions_to_check;
2306
2307         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2308                 ModelAction *read = promise->get_reader(i);
2309                 read_from(read, write);
2310                 actions_to_check.push_back(read);
2311         }
2312         /* Make sure the promise's value matches the write's value */
2313         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2314         if (!mo_graph->resolvePromise(promise, write))
2315                 priv->failed_promise = true;
2316
2317         /**
2318          * @todo  It is possible to end up in an inconsistent state, where a
2319          * "resolved" promise may still be referenced if
2320          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2321          *
2322          * Note that the inconsistency only matters when dumping mo_graph to
2323          * file.
2324          *
2325          * delete promise;
2326          */
2327
2328         //Check whether reading these writes has made threads unable to
2329         //resolve promises
2330         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2331                 ModelAction *read = actions_to_check[i];
2332                 mo_check_promises(read, true);
2333         }
2334
2335         return true;
2336 }
2337
2338 /**
2339  * Compute the set of promises that could potentially be satisfied by this
2340  * action. Note that the set computation actually appears in the Node, not in
2341  * ModelExecution.
2342  * @param curr The ModelAction that may satisfy promises
2343  */
2344 void ModelExecution::compute_promises(ModelAction *curr)
2345 {
2346         for (unsigned int i = 0; i < promises->size(); i++) {
2347                 Promise *promise = (*promises)[i];
2348                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2349                         continue;
2350
2351                 bool satisfy = true;
2352                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2353                         const ModelAction *act = promise->get_reader(j);
2354                         if (act->happens_before(curr) ||
2355                                         act->could_synchronize_with(curr)) {
2356                                 satisfy = false;
2357                                 break;
2358                         }
2359                 }
2360                 if (satisfy)
2361                         curr->get_node()->set_promise(i);
2362         }
2363 }
2364
2365 /** Checks promises in response to change in ClockVector Threads. */
2366 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2367 {
2368         for (unsigned int i = 0; i < promises->size(); i++) {
2369                 Promise *promise = (*promises)[i];
2370                 if (!promise->thread_is_available(tid))
2371                         continue;
2372                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2373                         const ModelAction *act = promise->get_reader(j);
2374                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2375                                         merge_cv->synchronized_since(act)) {
2376                                 if (promise->eliminate_thread(tid)) {
2377                                         /* Promise has failed */
2378                                         priv->failed_promise = true;
2379                                         return;
2380                                 }
2381                         }
2382                 }
2383         }
2384 }
2385
2386 void ModelExecution::check_promises_thread_disabled()
2387 {
2388         for (unsigned int i = 0; i < promises->size(); i++) {
2389                 Promise *promise = (*promises)[i];
2390                 if (promise->has_failed()) {
2391                         priv->failed_promise = true;
2392                         return;
2393                 }
2394         }
2395 }
2396
2397 /**
2398  * @brief Checks promises in response to addition to modification order for
2399  * threads.
2400  *
2401  * We test whether threads are still available for satisfying promises after an
2402  * addition to our modification order constraints. Those that are unavailable
2403  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2404  * that promise has failed.
2405  *
2406  * @param act The ModelAction which updated the modification order
2407  * @param is_read_check Should be true if act is a read and we must check for
2408  * updates to the store from which it read (there is a distinction here for
2409  * RMW's, which are both a load and a store)
2410  */
2411 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2412 {
2413         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2414
2415         for (unsigned int i = 0; i < promises->size(); i++) {
2416                 Promise *promise = (*promises)[i];
2417
2418                 // Is this promise on the same location?
2419                 if (!promise->same_location(write))
2420                         continue;
2421
2422                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2423                         const ModelAction *pread = promise->get_reader(j);
2424                         if (!pread->happens_before(act))
2425                                continue;
2426                         if (mo_graph->checkPromise(write, promise)) {
2427                                 priv->failed_promise = true;
2428                                 return;
2429                         }
2430                         break;
2431                 }
2432
2433                 // Don't do any lookups twice for the same thread
2434                 if (!promise->thread_is_available(act->get_tid()))
2435                         continue;
2436
2437                 if (mo_graph->checkReachable(promise, write)) {
2438                         if (mo_graph->checkPromise(write, promise)) {
2439                                 priv->failed_promise = true;
2440                                 return;
2441                         }
2442                 }
2443         }
2444 }
2445
2446 /**
2447  * Compute the set of writes that may break the current pending release
2448  * sequence. This information is extracted from previou release sequence
2449  * calculations.
2450  *
2451  * @param curr The current ModelAction. Must be a release sequence fixup
2452  * action.
2453  */
2454 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2455 {
2456         if (pending_rel_seqs->empty())
2457                 return;
2458
2459         struct release_seq *pending = pending_rel_seqs->back();
2460         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2461                 const ModelAction *write = pending->writes[i];
2462                 curr->get_node()->add_relseq_break(write);
2463         }
2464
2465         /* NULL means don't break the sequence; just synchronize */
2466         curr->get_node()->add_relseq_break(NULL);
2467 }
2468
2469 /**
2470  * Build up an initial set of all past writes that this 'read' action may read
2471  * from, as well as any previously-observed future values that must still be valid.
2472  *
2473  * @param curr is the current ModelAction that we are exploring; it must be a
2474  * 'read' operation.
2475  */
2476 void ModelExecution::build_may_read_from(ModelAction *curr)
2477 {
2478         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2479         unsigned int i;
2480         ASSERT(curr->is_read());
2481
2482         ModelAction *last_sc_write = NULL;
2483
2484         if (curr->is_seqcst())
2485                 last_sc_write = get_last_seq_cst_write(curr);
2486
2487         /* Iterate over all threads */
2488         for (i = 0; i < thrd_lists->size(); i++) {
2489                 /* Iterate over actions in thread, starting from most recent */
2490                 action_list_t *list = &(*thrd_lists)[i];
2491                 action_list_t::reverse_iterator rit;
2492                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2493                         ModelAction *act = *rit;
2494
2495                         /* Only consider 'write' actions */
2496                         if (!act->is_write() || act == curr)
2497                                 continue;
2498
2499                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2500                         bool allow_read = true;
2501
2502                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2503                                 allow_read = false;
2504                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2505                                 allow_read = false;
2506
2507                         if (allow_read) {
2508                                 /* Only add feasible reads */
2509                                 mo_graph->startChanges();
2510                                 r_modification_order(curr, act);
2511                                 if (!is_infeasible())
2512                                         curr->get_node()->add_read_from_past(act);
2513                                 mo_graph->rollbackChanges();
2514                         }
2515
2516                         /* Include at most one act per-thread that "happens before" curr */
2517                         if (act->happens_before(curr))
2518                                 break;
2519                 }
2520         }
2521
2522         /* Inherit existing, promised future values */
2523         for (i = 0; i < promises->size(); i++) {
2524                 const Promise *promise = (*promises)[i];
2525                 const ModelAction *promise_read = promise->get_reader(0);
2526                 if (promise_read->same_var(curr)) {
2527                         /* Only add feasible future-values */
2528                         mo_graph->startChanges();
2529                         r_modification_order(curr, promise);
2530                         if (!is_infeasible())
2531                                 curr->get_node()->add_read_from_promise(promise_read);
2532                         mo_graph->rollbackChanges();
2533                 }
2534         }
2535
2536         /* We may find no valid may-read-from only if the execution is doomed */
2537         if (!curr->get_node()->read_from_size()) {
2538                 priv->no_valid_reads = true;
2539                 set_assert();
2540         }
2541
2542         if (DBG_ENABLED()) {
2543                 model_print("Reached read action:\n");
2544                 curr->print();
2545                 model_print("Printing read_from_past\n");
2546                 curr->get_node()->print_read_from_past();
2547                 model_print("End printing read_from_past\n");
2548         }
2549 }
2550
2551 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2552 {
2553         for ( ; write != NULL; write = write->get_reads_from()) {
2554                 /* UNINIT actions don't have a Node, and they never sleep */
2555                 if (write->is_uninitialized())
2556                         return true;
2557                 Node *prevnode = write->get_node()->get_parent();
2558
2559                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2560                 if (write->is_release() && thread_sleep)
2561                         return true;
2562                 if (!write->is_rmw())
2563                         return false;
2564         }
2565         return true;
2566 }
2567
2568 /**
2569  * @brief Get an action representing an uninitialized atomic
2570  *
2571  * This function may create a new one or try to retrieve one from the NodeStack
2572  *
2573  * @param curr The current action, which prompts the creation of an UNINIT action
2574  * @return A pointer to the UNINIT ModelAction
2575  */
2576 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2577 {
2578         Node *node = curr->get_node();
2579         ModelAction *act = node->get_uninit_action();
2580         if (!act) {
2581                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2582                 node->set_uninit_action(act);
2583         }
2584         act->create_cv(NULL);
2585         return act;
2586 }
2587
2588 static void print_list(action_list_t *list)
2589 {
2590         action_list_t::iterator it;
2591
2592         model_print("---------------------------------------------------------------------\n");
2593
2594         unsigned int hash = 0;
2595
2596         for (it = list->begin(); it != list->end(); it++) {
2597                 const ModelAction *act = *it;
2598                 if (act->get_seq_number() > 0)
2599                         act->print();
2600                 hash = hash^(hash<<3)^((*it)->hash());
2601         }
2602         model_print("HASH %u\n", hash);
2603         model_print("---------------------------------------------------------------------\n");
2604 }
2605
2606 #if SUPPORT_MOD_ORDER_DUMP
2607 void ModelExecution::dumpGraph(char *filename) const
2608 {
2609         char buffer[200];
2610         sprintf(buffer, "%s.dot", filename);
2611         FILE *file = fopen(buffer, "w");
2612         fprintf(file, "digraph %s {\n", filename);
2613         mo_graph->dumpNodes(file);
2614         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2615
2616         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2617                 ModelAction *act = *it;
2618                 if (act->is_read()) {
2619                         mo_graph->dot_print_node(file, act);
2620                         if (act->get_reads_from())
2621                                 mo_graph->dot_print_edge(file,
2622                                                 act->get_reads_from(),
2623                                                 act,
2624                                                 "label=\"rf\", color=red, weight=2");
2625                         else
2626                                 mo_graph->dot_print_edge(file,
2627                                                 act->get_reads_from_promise(),
2628                                                 act,
2629                                                 "label=\"rf\", color=red");
2630                 }
2631                 if (thread_array[act->get_tid()]) {
2632                         mo_graph->dot_print_edge(file,
2633                                         thread_array[id_to_int(act->get_tid())],
2634                                         act,
2635                                         "label=\"sb\", color=blue, weight=400");
2636                 }
2637
2638                 thread_array[act->get_tid()] = act;
2639         }
2640         fprintf(file, "}\n");
2641         model_free(thread_array);
2642         fclose(file);
2643 }
2644 #endif
2645
2646 /** @brief Prints an execution trace summary. */
2647 void ModelExecution::print_summary() const
2648 {
2649 #if SUPPORT_MOD_ORDER_DUMP
2650         char buffername[100];
2651         sprintf(buffername, "exec%04u", execution_number);
2652         mo_graph->dumpGraphToFile(buffername);
2653         sprintf(buffername, "graph%04u", execution_number);
2654         dumpGraph(buffername);
2655 #endif
2656
2657         model_print("Execution %d:", execution_number);
2658         if (isfeasibleprefix()) {
2659                 if (scheduler->all_threads_sleeping())
2660                         model_print(" SLEEP-SET REDUNDANT");
2661                 model_print("\n");
2662         } else
2663                 print_infeasibility(" INFEASIBLE");
2664         print_list(action_trace);
2665         model_print("\n");
2666         if (!promises->empty()) {
2667                 model_print("Pending promises:\n");
2668                 for (unsigned int i = 0; i < promises->size(); i++) {
2669                         model_print(" [P%u] ", i);
2670                         (*promises)[i]->print();
2671                 }
2672                 model_print("\n");
2673         }
2674 }
2675
2676 /**
2677  * Add a Thread to the system for the first time. Should only be called once
2678  * per thread.
2679  * @param t The Thread to add
2680  */
2681 void ModelExecution::add_thread(Thread *t)
2682 {
2683         thread_map->put(id_to_int(t->get_id()), t);
2684         if (!t->is_model_thread())
2685                 scheduler->add_thread(t);
2686 }
2687
2688 /**
2689  * @brief Get a Thread reference by its ID
2690  * @param tid The Thread's ID
2691  * @return A Thread reference
2692  */
2693 Thread * ModelExecution::get_thread(thread_id_t tid) const
2694 {
2695         return thread_map->get(id_to_int(tid));
2696 }
2697
2698 /**
2699  * @brief Get a reference to the Thread in which a ModelAction was executed
2700  * @param act The ModelAction
2701  * @return A Thread reference
2702  */
2703 Thread * ModelExecution::get_thread(const ModelAction *act) const
2704 {
2705         return get_thread(act->get_tid());
2706 }
2707
2708 /**
2709  * @brief Get a Promise's "promise number"
2710  *
2711  * A "promise number" is an index number that is unique to a promise, valid
2712  * only for a specific snapshot of an execution trace. Promises may come and go
2713  * as they are generated an resolved, so an index only retains meaning for the
2714  * current snapshot.
2715  *
2716  * @param promise The Promise to check
2717  * @return The promise index, if the promise still is valid; otherwise -1
2718  */
2719 int ModelExecution::get_promise_number(const Promise *promise) const
2720 {
2721         for (unsigned int i = 0; i < promises->size(); i++)
2722                 if ((*promises)[i] == promise)
2723                         return i;
2724         /* Not found */
2725         return -1;
2726 }
2727
2728 /**
2729  * @brief Check if a Thread is currently enabled
2730  * @param t The Thread to check
2731  * @return True if the Thread is currently enabled
2732  */
2733 bool ModelExecution::is_enabled(Thread *t) const
2734 {
2735         return scheduler->is_enabled(t);
2736 }
2737
2738 /**
2739  * @brief Check if a Thread is currently enabled
2740  * @param tid The ID of the Thread to check
2741  * @return True if the Thread is currently enabled
2742  */
2743 bool ModelExecution::is_enabled(thread_id_t tid) const
2744 {
2745         return scheduler->is_enabled(tid);
2746 }
2747
2748 /**
2749  * @brief Select the next thread to execute based on the curren action
2750  *
2751  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2752  * actions should be followed by the execution of their child thread. In either
2753  * case, the current action should determine the next thread schedule.
2754  *
2755  * @param curr The current action
2756  * @return The next thread to run, if the current action will determine this
2757  * selection; otherwise NULL
2758  */
2759 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2760 {
2761         /* Do not split atomic RMW */
2762         if (curr->is_rmwr())
2763                 return get_thread(curr);
2764         /* Follow CREATE with the created thread */
2765         if (curr->get_type() == THREAD_CREATE)
2766                 return curr->get_thread_operand();
2767         return NULL;
2768 }
2769
2770 /** @return True if the execution has taken too many steps */
2771 bool ModelExecution::too_many_steps() const
2772 {
2773         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2774 }
2775
2776 /**
2777  * Takes the next step in the execution, if possible.
2778  * @param curr The current step to take
2779  * @return Returns the next Thread to run, if any; NULL if this execution
2780  * should terminate
2781  */
2782 Thread * ModelExecution::take_step(ModelAction *curr)
2783 {
2784         Thread *curr_thrd = get_thread(curr);
2785         ASSERT(curr_thrd->get_state() == THREAD_READY);
2786
2787         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2788         curr = check_current_action(curr);
2789         ASSERT(curr);
2790
2791         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2792                 scheduler->remove_thread(curr_thrd);
2793
2794         return action_select_next_thread(curr);
2795 }
2796
2797 /**
2798  * Launch end-of-execution release sequence fixups only when
2799  * the execution is otherwise feasible AND there are:
2800  *
2801  * (1) pending release sequences
2802  * (2) pending assertions that could be invalidated by a change
2803  * in clock vectors (i.e., data races)
2804  * (3) no pending promises
2805  */
2806 void ModelExecution::fixup_release_sequences()
2807 {
2808         while (!pending_rel_seqs->empty() &&
2809                         is_feasible_prefix_ignore_relseq() &&
2810                         !unrealizedraces.empty()) {
2811                 model_print("*** WARNING: release sequence fixup action "
2812                                 "(%zu pending release seuqence(s)) ***\n",
2813                                 pending_rel_seqs->size());
2814                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2815                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2816                                 model_thread);
2817                 take_step(fixup);
2818         };
2819 }