execution: correct comment on check_current_action()
[model-checker.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(),
68         thread_map(2), /* We'll always need at least 2 threads */
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         promises(),
73         futurevalues(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         add_thread(model_thread);
84         scheduler->register_engine(this);
85         node_stack->register_engine(this);
86 }
87
88 /** @brief Destructor */
89 ModelExecution::~ModelExecution()
90 {
91         for (unsigned int i = 0; i < get_num_threads(); i++)
92                 delete get_thread(int_to_id(i));
93
94         for (unsigned int i = 0; i < promises.size(); i++)
95                 delete promises[i];
96
97         delete mo_graph;
98         delete priv;
99 }
100
101 int ModelExecution::get_execution_number() const
102 {
103         return model->get_execution_number();
104 }
105
106 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
107 {
108         action_list_t *tmp = hash->get(ptr);
109         if (tmp == NULL) {
110                 tmp = new action_list_t();
111                 hash->put(ptr, tmp);
112         }
113         return tmp;
114 }
115
116 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
117 {
118         SnapVector<action_list_t> *tmp = hash->get(ptr);
119         if (tmp == NULL) {
120                 tmp = new SnapVector<action_list_t>();
121                 hash->put(ptr, tmp);
122         }
123         return tmp;
124 }
125
126 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
127 {
128         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
129         if (wrv==NULL)
130                 return NULL;
131         unsigned int thread=id_to_int(tid);
132         if (thread < wrv->size())
133                 return &(*wrv)[thread];
134         else
135                 return NULL;
136 }
137
138 /** @return a thread ID for a new Thread */
139 thread_id_t ModelExecution::get_next_id()
140 {
141         return priv->next_thread_id++;
142 }
143
144 /** @return the number of user threads created during this execution */
145 unsigned int ModelExecution::get_num_threads() const
146 {
147         return priv->next_thread_id;
148 }
149
150 /** @return a sequence number for a new ModelAction */
151 modelclock_t ModelExecution::get_next_seq_num()
152 {
153         return ++priv->used_sequence_numbers;
154 }
155
156 /**
157  * @brief Should the current action wake up a given thread?
158  *
159  * @param curr The current action
160  * @param thread The thread that we might wake up
161  * @return True, if we should wake up the sleeping thread; false otherwise
162  */
163 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
164 {
165         const ModelAction *asleep = thread->get_pending();
166         /* Don't allow partial RMW to wake anyone up */
167         if (curr->is_rmwr())
168                 return false;
169         /* Synchronizing actions may have been backtracked */
170         if (asleep->could_synchronize_with(curr))
171                 return true;
172         /* All acquire/release fences and fence-acquire/store-release */
173         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
174                 return true;
175         /* Fence-release + store can awake load-acquire on the same location */
176         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
177                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
178                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
179                         return true;
180         }
181         return false;
182 }
183
184 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
185 {
186         for (unsigned int i = 0; i < get_num_threads(); i++) {
187                 Thread *thr = get_thread(int_to_id(i));
188                 if (scheduler->is_sleep_set(thr)) {
189                         if (should_wake_up(curr, thr))
190                                 /* Remove this thread from sleep set */
191                                 scheduler->remove_sleep(thr);
192                 }
193         }
194 }
195
196 /** @brief Alert the model-checker that an incorrectly-ordered
197  * synchronization was made */
198 void ModelExecution::set_bad_synchronization()
199 {
200         priv->bad_synchronization = true;
201 }
202
203 bool ModelExecution::assert_bug(const char *msg)
204 {
205         priv->bugs.push_back(new bug_message(msg));
206
207         if (isfeasibleprefix()) {
208                 set_assert();
209                 return true;
210         }
211         return false;
212 }
213
214 /** @return True, if any bugs have been reported for this execution */
215 bool ModelExecution::have_bug_reports() const
216 {
217         return priv->bugs.size() != 0;
218 }
219
220 SnapVector<bug_message *> * ModelExecution::get_bugs() const
221 {
222         return &priv->bugs;
223 }
224
225 /**
226  * Check whether the current trace has triggered an assertion which should halt
227  * its execution.
228  *
229  * @return True, if the execution should be aborted; false otherwise
230  */
231 bool ModelExecution::has_asserted() const
232 {
233         return priv->asserted;
234 }
235
236 /**
237  * Trigger a trace assertion which should cause this execution to be halted.
238  * This can be due to a detected bug or due to an infeasibility that should
239  * halt ASAP.
240  */
241 void ModelExecution::set_assert()
242 {
243         priv->asserted = true;
244 }
245
246 /**
247  * Check if we are in a deadlock. Should only be called at the end of an
248  * execution, although it should not give false positives in the middle of an
249  * execution (there should be some ENABLED thread).
250  *
251  * @return True if program is in a deadlock; false otherwise
252  */
253 bool ModelExecution::is_deadlocked() const
254 {
255         bool blocking_threads = false;
256         for (unsigned int i = 0; i < get_num_threads(); i++) {
257                 thread_id_t tid = int_to_id(i);
258                 if (is_enabled(tid))
259                         return false;
260                 Thread *t = get_thread(tid);
261                 if (!t->is_model_thread() && t->get_pending())
262                         blocking_threads = true;
263         }
264         return blocking_threads;
265 }
266
267 /**
268  * Check if this is a complete execution. That is, have all thread completed
269  * execution (rather than exiting because sleep sets have forced a redundant
270  * execution).
271  *
272  * @return True if the execution is complete.
273  */
274 bool ModelExecution::is_complete_execution() const
275 {
276         for (unsigned int i = 0; i < get_num_threads(); i++)
277                 if (is_enabled(int_to_id(i)))
278                         return false;
279         return true;
280 }
281
282 /**
283  * @brief Find the last fence-related backtracking conflict for a ModelAction
284  *
285  * This function performs the search for the most recent conflicting action
286  * against which we should perform backtracking, as affected by fence
287  * operations. This includes pairs of potentially-synchronizing actions which
288  * occur due to fence-acquire or fence-release, and hence should be explored in
289  * the opposite execution order.
290  *
291  * @param act The current action
292  * @return The most recent action which conflicts with act due to fences
293  */
294 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
295 {
296         /* Only perform release/acquire fence backtracking for stores */
297         if (!act->is_write())
298                 return NULL;
299
300         /* Find a fence-release (or, act is a release) */
301         ModelAction *last_release;
302         if (act->is_release())
303                 last_release = act;
304         else
305                 last_release = get_last_fence_release(act->get_tid());
306         if (!last_release)
307                 return NULL;
308
309         /* Skip past the release */
310         const action_list_t *list = &action_trace;
311         action_list_t::const_reverse_iterator rit;
312         for (rit = list->rbegin(); rit != list->rend(); rit++)
313                 if (*rit == last_release)
314                         break;
315         ASSERT(rit != list->rend());
316
317         /* Find a prior:
318          *   load-acquire
319          * or
320          *   load --sb-> fence-acquire */
321         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
322         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
323         bool found_acquire_fences = false;
324         for ( ; rit != list->rend(); rit++) {
325                 ModelAction *prev = *rit;
326                 if (act->same_thread(prev))
327                         continue;
328
329                 int tid = id_to_int(prev->get_tid());
330
331                 if (prev->is_read() && act->same_var(prev)) {
332                         if (prev->is_acquire()) {
333                                 /* Found most recent load-acquire, don't need
334                                  * to search for more fences */
335                                 if (!found_acquire_fences)
336                                         return NULL;
337                         } else {
338                                 prior_loads[tid] = prev;
339                         }
340                 }
341                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
342                         found_acquire_fences = true;
343                         acquire_fences[tid] = prev;
344                 }
345         }
346
347         ModelAction *latest_backtrack = NULL;
348         for (unsigned int i = 0; i < acquire_fences.size(); i++)
349                 if (acquire_fences[i] && prior_loads[i])
350                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
351                                 latest_backtrack = acquire_fences[i];
352         return latest_backtrack;
353 }
354
355 /**
356  * @brief Find the last backtracking conflict for a ModelAction
357  *
358  * This function performs the search for the most recent conflicting action
359  * against which we should perform backtracking. This primary includes pairs of
360  * synchronizing actions which should be explored in the opposite execution
361  * order.
362  *
363  * @param act The current action
364  * @return The most recent action which conflicts with act
365  */
366 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
367 {
368         switch (act->get_type()) {
369         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
370         case ATOMIC_READ:
371         case ATOMIC_WRITE:
372         case ATOMIC_RMW: {
373                 ModelAction *ret = NULL;
374
375                 /* linear search: from most recent to oldest */
376                 action_list_t *list = obj_map.get(act->get_location());
377                 action_list_t::reverse_iterator rit;
378                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
379                         ModelAction *prev = *rit;
380                         if (prev->could_synchronize_with(act)) {
381                                 ret = prev;
382                                 break;
383                         }
384                 }
385
386                 ModelAction *ret2 = get_last_fence_conflict(act);
387                 if (!ret2)
388                         return ret;
389                 if (!ret)
390                         return ret2;
391                 if (*ret < *ret2)
392                         return ret2;
393                 return ret;
394         }
395         case ATOMIC_LOCK:
396         case ATOMIC_TRYLOCK: {
397                 /* linear search: from most recent to oldest */
398                 action_list_t *list = obj_map.get(act->get_location());
399                 action_list_t::reverse_iterator rit;
400                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
401                         ModelAction *prev = *rit;
402                         if (act->is_conflicting_lock(prev))
403                                 return prev;
404                 }
405                 break;
406         }
407         case ATOMIC_UNLOCK: {
408                 /* linear search: from most recent to oldest */
409                 action_list_t *list = obj_map.get(act->get_location());
410                 action_list_t::reverse_iterator rit;
411                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
412                         ModelAction *prev = *rit;
413                         if (!act->same_thread(prev) && prev->is_failed_trylock())
414                                 return prev;
415                 }
416                 break;
417         }
418         case ATOMIC_WAIT: {
419                 /* linear search: from most recent to oldest */
420                 action_list_t *list = obj_map.get(act->get_location());
421                 action_list_t::reverse_iterator rit;
422                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
423                         ModelAction *prev = *rit;
424                         if (!act->same_thread(prev) && prev->is_failed_trylock())
425                                 return prev;
426                         if (!act->same_thread(prev) && prev->is_notify())
427                                 return prev;
428                 }
429                 break;
430         }
431
432         case ATOMIC_NOTIFY_ALL:
433         case ATOMIC_NOTIFY_ONE: {
434                 /* linear search: from most recent to oldest */
435                 action_list_t *list = obj_map.get(act->get_location());
436                 action_list_t::reverse_iterator rit;
437                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
438                         ModelAction *prev = *rit;
439                         if (!act->same_thread(prev) && prev->is_wait())
440                                 return prev;
441                 }
442                 break;
443         }
444         default:
445                 break;
446         }
447         return NULL;
448 }
449
450 /** This method finds backtracking points where we should try to
451  * reorder the parameter ModelAction against.
452  *
453  * @param the ModelAction to find backtracking points for.
454  */
455 void ModelExecution::set_backtracking(ModelAction *act)
456 {
457         Thread *t = get_thread(act);
458         ModelAction *prev = get_last_conflict(act);
459         if (prev == NULL)
460                 return;
461
462         Node *node = prev->get_node()->get_parent();
463
464         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
465         int low_tid, high_tid;
466         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
467                 low_tid = id_to_int(act->get_tid());
468                 high_tid = low_tid + 1;
469         } else {
470                 low_tid = 0;
471                 high_tid = get_num_threads();
472         }
473
474         for (int i = low_tid; i < high_tid; i++) {
475                 thread_id_t tid = int_to_id(i);
476
477                 /* Make sure this thread can be enabled here. */
478                 if (i >= node->get_num_threads())
479                         break;
480
481                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
482                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
483                 if (node->enabled_status(tid) != THREAD_ENABLED)
484                         continue;
485
486                 /* Check if this has been explored already */
487                 if (node->has_been_explored(tid))
488                         continue;
489
490                 /* See if fairness allows */
491                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
492                         bool unfair = false;
493                         for (int t = 0; t < node->get_num_threads(); t++) {
494                                 thread_id_t tother = int_to_id(t);
495                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
496                                         unfair = true;
497                                         break;
498                                 }
499                         }
500                         if (unfair)
501                                 continue;
502                 }
503
504                 /* See if CHESS-like yield fairness allows */
505                 if (params->yieldon) {
506                         bool unfair = false;
507                         for (int t = 0; t < node->get_num_threads(); t++) {
508                                 thread_id_t tother = int_to_id(t);
509                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
510                                         unfair = true;
511                                         break;
512                                 }
513                         }
514                         if (unfair)
515                                 continue;
516                 }
517
518                 /* Cache the latest backtracking point */
519                 set_latest_backtrack(prev);
520
521                 /* If this is a new backtracking point, mark the tree */
522                 if (!node->set_backtrack(tid))
523                         continue;
524                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
525                                         id_to_int(prev->get_tid()),
526                                         id_to_int(t->get_id()));
527                 if (DBG_ENABLED()) {
528                         prev->print();
529                         act->print();
530                 }
531         }
532 }
533
534 /**
535  * @brief Cache the a backtracking point as the "most recent", if eligible
536  *
537  * Note that this does not prepare the NodeStack for this backtracking
538  * operation, it only caches the action on a per-execution basis
539  *
540  * @param act The operation at which we should explore a different next action
541  * (i.e., backtracking point)
542  * @return True, if this action is now the most recent backtracking point;
543  * false otherwise
544  */
545 bool ModelExecution::set_latest_backtrack(ModelAction *act)
546 {
547         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
548                 priv->next_backtrack = act;
549                 return true;
550         }
551         return false;
552 }
553
554 /**
555  * Returns last backtracking point. The model checker will explore a different
556  * path for this point in the next execution.
557  * @return The ModelAction at which the next execution should diverge.
558  */
559 ModelAction * ModelExecution::get_next_backtrack()
560 {
561         ModelAction *next = priv->next_backtrack;
562         priv->next_backtrack = NULL;
563         return next;
564 }
565
566 /**
567  * Processes a read model action.
568  * @param curr is the read model action to process.
569  * @return True if processing this read updates the mo_graph.
570  */
571 bool ModelExecution::process_read(ModelAction *curr)
572 {
573         Node *node = curr->get_node();
574         while (true) {
575                 bool updated = false;
576                 switch (node->get_read_from_status()) {
577                 case READ_FROM_PAST: {
578                         const ModelAction *rf = node->get_read_from_past();
579                         ASSERT(rf);
580
581                         mo_graph->startChanges();
582
583                         ASSERT(!is_infeasible());
584                         if (!check_recency(curr, rf)) {
585                                 if (node->increment_read_from()) {
586                                         mo_graph->rollbackChanges();
587                                         continue;
588                                 } else {
589                                         priv->too_many_reads = true;
590                                 }
591                         }
592
593                         updated = r_modification_order(curr, rf);
594                         read_from(curr, rf);
595                         mo_graph->commitChanges();
596                         mo_check_promises(curr, true);
597                         break;
598                 }
599                 case READ_FROM_PROMISE: {
600                         Promise *promise = curr->get_node()->get_read_from_promise();
601                         if (promise->add_reader(curr))
602                                 priv->failed_promise = true;
603                         curr->set_read_from_promise(promise);
604                         mo_graph->startChanges();
605                         if (!check_recency(curr, promise))
606                                 priv->too_many_reads = true;
607                         updated = r_modification_order(curr, promise);
608                         mo_graph->commitChanges();
609                         break;
610                 }
611                 case READ_FROM_FUTURE: {
612                         /* Read from future value */
613                         struct future_value fv = node->get_future_value();
614                         Promise *promise = new Promise(this, curr, fv);
615                         curr->set_read_from_promise(promise);
616                         promises.push_back(promise);
617                         mo_graph->startChanges();
618                         updated = r_modification_order(curr, promise);
619                         mo_graph->commitChanges();
620                         break;
621                 }
622                 default:
623                         ASSERT(false);
624                 }
625                 get_thread(curr)->set_return_value(curr->get_return_value());
626                 return updated;
627         }
628 }
629
630 /**
631  * Processes a lock, trylock, or unlock model action.  @param curr is
632  * the read model action to process.
633  *
634  * The try lock operation checks whether the lock is taken.  If not,
635  * it falls to the normal lock operation case.  If so, it returns
636  * fail.
637  *
638  * The lock operation has already been checked that it is enabled, so
639  * it just grabs the lock and synchronizes with the previous unlock.
640  *
641  * The unlock operation has to re-enable all of the threads that are
642  * waiting on the lock.
643  *
644  * @return True if synchronization was updated; false otherwise
645  */
646 bool ModelExecution::process_mutex(ModelAction *curr)
647 {
648         std::mutex *mutex = curr->get_mutex();
649         struct std::mutex_state *state = NULL;
650
651         if (mutex)
652                 state = mutex->get_state();
653
654         switch (curr->get_type()) {
655         case ATOMIC_TRYLOCK: {
656                 bool success = !state->locked;
657                 curr->set_try_lock(success);
658                 if (!success) {
659                         get_thread(curr)->set_return_value(0);
660                         break;
661                 }
662                 get_thread(curr)->set_return_value(1);
663         }
664                 //otherwise fall into the lock case
665         case ATOMIC_LOCK: {
666                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
667                         assert_bug("Lock access before initialization");
668                 state->locked = get_thread(curr);
669                 ModelAction *unlock = get_last_unlock(curr);
670                 //synchronize with the previous unlock statement
671                 if (unlock != NULL) {
672                         synchronize(unlock, curr);
673                         return true;
674                 }
675                 break;
676         }
677         case ATOMIC_WAIT:
678         case ATOMIC_UNLOCK: {
679                 /* wake up the other threads */
680                 for (unsigned int i = 0; i < get_num_threads(); i++) {
681                         Thread *t = get_thread(int_to_id(i));
682                         Thread *curr_thrd = get_thread(curr);
683                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
684                                 scheduler->wake(t);
685                 }
686
687                 /* unlock the lock - after checking who was waiting on it */
688                 state->locked = NULL;
689
690                 if (!curr->is_wait())
691                         break; /* The rest is only for ATOMIC_WAIT */
692
693                 /* Should we go to sleep? (simulate spurious failures) */
694                 if (curr->get_node()->get_misc() == 0) {
695                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
696                         /* disable us */
697                         scheduler->sleep(get_thread(curr));
698                 }
699                 break;
700         }
701         case ATOMIC_NOTIFY_ALL: {
702                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
703                 //activate all the waiting threads
704                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
705                         scheduler->wake(get_thread(*rit));
706                 }
707                 waiters->clear();
708                 break;
709         }
710         case ATOMIC_NOTIFY_ONE: {
711                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
712                 int wakeupthread = curr->get_node()->get_misc();
713                 action_list_t::iterator it = waiters->begin();
714                 advance(it, wakeupthread);
715                 scheduler->wake(get_thread(*it));
716                 waiters->erase(it);
717                 break;
718         }
719
720         default:
721                 ASSERT(0);
722         }
723         return false;
724 }
725
726 /**
727  * @brief Check if the current pending promises allow a future value to be sent
728  *
729  * If one of the following is true:
730  *  (a) there are no pending promises
731  *  (b) the reader and writer do not cross any promises
732  * Then, it is safe to pass a future value back now.
733  *
734  * Otherwise, we must save the pending future value until (a) or (b) is true
735  *
736  * @param writer The operation which sends the future value. Must be a write.
737  * @param reader The operation which will observe the value. Must be a read.
738  * @return True if the future value can be sent now; false if it must wait.
739  */
740 bool ModelExecution::promises_may_allow(const ModelAction *writer,
741                 const ModelAction *reader) const
742 {
743         if (promises.empty())
744                 return true;
745         for (int i = promises.size() - 1; i >= 0; i--) {
746                 ModelAction *pr = promises[i]->get_reader(0);
747                 //reader is after promise...doesn't cross any promise
748                 if (*reader > *pr)
749                         return true;
750                 //writer is after promise, reader before...bad...
751                 if (*writer > *pr)
752                         return false;
753         }
754         return true;
755 }
756
757 /**
758  * @brief Add a future value to a reader
759  *
760  * This function performs a few additional checks to ensure that the future
761  * value can be feasibly observed by the reader
762  *
763  * @param writer The operation whose value is sent. Must be a write.
764  * @param reader The read operation which may read the future value. Must be a read.
765  */
766 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
767 {
768         /* Do more ambitious checks now that mo is more complete */
769         if (!mo_may_allow(writer, reader))
770                 return;
771
772         Node *node = reader->get_node();
773
774         /* Find an ancestor thread which exists at the time of the reader */
775         Thread *write_thread = get_thread(writer);
776         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
777                 write_thread = write_thread->get_parent();
778
779         struct future_value fv = {
780                 writer->get_write_value(),
781                 writer->get_seq_number() + params->maxfuturedelay,
782                 write_thread->get_id(),
783         };
784         if (node->add_future_value(fv))
785                 set_latest_backtrack(reader);
786 }
787
788 /**
789  * Process a write ModelAction
790  * @param curr The ModelAction to process
791  * @return True if the mo_graph was updated or promises were resolved
792  */
793 bool ModelExecution::process_write(ModelAction *curr)
794 {
795         /* Readers to which we may send our future value */
796         ModelVector<ModelAction *> send_fv;
797
798         const ModelAction *earliest_promise_reader;
799         bool updated_promises = false;
800
801         bool updated_mod_order = w_modification_order(curr, &send_fv);
802         Promise *promise = pop_promise_to_resolve(curr);
803
804         if (promise) {
805                 earliest_promise_reader = promise->get_reader(0);
806                 updated_promises = resolve_promise(curr, promise);
807         } else
808                 earliest_promise_reader = NULL;
809
810         for (unsigned int i = 0; i < send_fv.size(); i++) {
811                 ModelAction *read = send_fv[i];
812
813                 /* Don't send future values to reads after the Promise we resolve */
814                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
815                         /* Check if future value can be sent immediately */
816                         if (promises_may_allow(curr, read)) {
817                                 add_future_value(curr, read);
818                         } else {
819                                 futurevalues.push_back(PendingFutureValue(curr, read));
820                         }
821                 }
822         }
823
824         /* Check the pending future values */
825         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
826                 struct PendingFutureValue pfv = futurevalues[i];
827                 if (promises_may_allow(pfv.writer, pfv.reader)) {
828                         add_future_value(pfv.writer, pfv.reader);
829                         futurevalues.erase(futurevalues.begin() + i);
830                 }
831         }
832
833         mo_graph->commitChanges();
834         mo_check_promises(curr, false);
835
836         get_thread(curr)->set_return_value(VALUE_NONE);
837         return updated_mod_order || updated_promises;
838 }
839
840 /**
841  * Process a fence ModelAction
842  * @param curr The ModelAction to process
843  * @return True if synchronization was updated
844  */
845 bool ModelExecution::process_fence(ModelAction *curr)
846 {
847         /*
848          * fence-relaxed: no-op
849          * fence-release: only log the occurence (not in this function), for
850          *   use in later synchronization
851          * fence-acquire (this function): search for hypothetical release
852          *   sequences
853          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
854          */
855         bool updated = false;
856         if (curr->is_acquire()) {
857                 action_list_t *list = &action_trace;
858                 action_list_t::reverse_iterator rit;
859                 /* Find X : is_read(X) && X --sb-> curr */
860                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
861                         ModelAction *act = *rit;
862                         if (act == curr)
863                                 continue;
864                         if (act->get_tid() != curr->get_tid())
865                                 continue;
866                         /* Stop at the beginning of the thread */
867                         if (act->is_thread_start())
868                                 break;
869                         /* Stop once we reach a prior fence-acquire */
870                         if (act->is_fence() && act->is_acquire())
871                                 break;
872                         if (!act->is_read())
873                                 continue;
874                         /* read-acquire will find its own release sequences */
875                         if (act->is_acquire())
876                                 continue;
877
878                         /* Establish hypothetical release sequences */
879                         rel_heads_list_t release_heads;
880                         get_release_seq_heads(curr, act, &release_heads);
881                         for (unsigned int i = 0; i < release_heads.size(); i++)
882                                 synchronize(release_heads[i], curr);
883                         if (release_heads.size() != 0)
884                                 updated = true;
885                 }
886         }
887         return updated;
888 }
889
890 /**
891  * @brief Process the current action for thread-related activity
892  *
893  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
894  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
895  * synchronization, etc.  This function is a no-op for non-THREAD actions
896  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
897  *
898  * @param curr The current action
899  * @return True if synchronization was updated or a thread completed
900  */
901 bool ModelExecution::process_thread_action(ModelAction *curr)
902 {
903         bool updated = false;
904
905         switch (curr->get_type()) {
906         case THREAD_CREATE: {
907                 thrd_t *thrd = (thrd_t *)curr->get_location();
908                 struct thread_params *params = (struct thread_params *)curr->get_value();
909                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
910                 add_thread(th);
911                 th->set_creation(curr);
912                 /* Promises can be satisfied by children */
913                 for (unsigned int i = 0; i < promises.size(); i++) {
914                         Promise *promise = promises[i];
915                         if (promise->thread_is_available(curr->get_tid()))
916                                 promise->add_thread(th->get_id());
917                 }
918                 break;
919         }
920         case THREAD_JOIN: {
921                 Thread *blocking = curr->get_thread_operand();
922                 ModelAction *act = get_last_action(blocking->get_id());
923                 synchronize(act, curr);
924                 updated = true; /* trigger rel-seq checks */
925                 break;
926         }
927         case THREAD_FINISH: {
928                 Thread *th = get_thread(curr);
929                 /* Wake up any joining threads */
930                 for (unsigned int i = 0; i < get_num_threads(); i++) {
931                         Thread *waiting = get_thread(int_to_id(i));
932                         if (waiting->waiting_on() == th &&
933                                         waiting->get_pending()->is_thread_join())
934                                 scheduler->wake(waiting);
935                 }
936                 th->complete();
937                 /* Completed thread can't satisfy promises */
938                 for (unsigned int i = 0; i < promises.size(); i++) {
939                         Promise *promise = promises[i];
940                         if (promise->thread_is_available(th->get_id()))
941                                 if (promise->eliminate_thread(th->get_id()))
942                                         priv->failed_promise = true;
943                 }
944                 updated = true; /* trigger rel-seq checks */
945                 break;
946         }
947         case THREAD_START: {
948                 check_promises(curr->get_tid(), NULL, curr->get_cv());
949                 break;
950         }
951         default:
952                 break;
953         }
954
955         return updated;
956 }
957
958 /**
959  * @brief Process the current action for release sequence fixup activity
960  *
961  * Performs model-checker release sequence fixups for the current action,
962  * forcing a single pending release sequence to break (with a given, potential
963  * "loose" write) or to complete (i.e., synchronize). If a pending release
964  * sequence forms a complete release sequence, then we must perform the fixup
965  * synchronization, mo_graph additions, etc.
966  *
967  * @param curr The current action; must be a release sequence fixup action
968  * @param work_queue The work queue to which to add work items as they are
969  * generated
970  */
971 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
972 {
973         const ModelAction *write = curr->get_node()->get_relseq_break();
974         struct release_seq *sequence = pending_rel_seqs.back();
975         pending_rel_seqs.pop_back();
976         ASSERT(sequence);
977         ModelAction *acquire = sequence->acquire;
978         const ModelAction *rf = sequence->rf;
979         const ModelAction *release = sequence->release;
980         ASSERT(acquire);
981         ASSERT(release);
982         ASSERT(rf);
983         ASSERT(release->same_thread(rf));
984
985         if (write == NULL) {
986                 /**
987                  * @todo Forcing a synchronization requires that we set
988                  * modification order constraints. For instance, we can't allow
989                  * a fixup sequence in which two separate read-acquire
990                  * operations read from the same sequence, where the first one
991                  * synchronizes and the other doesn't. Essentially, we can't
992                  * allow any writes to insert themselves between 'release' and
993                  * 'rf'
994                  */
995
996                 /* Must synchronize */
997                 if (!synchronize(release, acquire))
998                         return;
999                 /* Re-check all pending release sequences */
1000                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1001                 /* Re-check act for mo_graph edges */
1002                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1003
1004                 /* propagate synchronization to later actions */
1005                 action_list_t::reverse_iterator rit = action_trace.rbegin();
1006                 for (; (*rit) != acquire; rit++) {
1007                         ModelAction *propagate = *rit;
1008                         if (acquire->happens_before(propagate)) {
1009                                 synchronize(acquire, propagate);
1010                                 /* Re-check 'propagate' for mo_graph edges */
1011                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1012                         }
1013                 }
1014         } else {
1015                 /* Break release sequence with new edges:
1016                  *   release --mo--> write --mo--> rf */
1017                 mo_graph->addEdge(release, write);
1018                 mo_graph->addEdge(write, rf);
1019         }
1020
1021         /* See if we have realized a data race */
1022         checkDataRaces();
1023 }
1024
1025 /**
1026  * Initialize the current action by performing one or more of the following
1027  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1028  * in the NodeStack, manipulating backtracking sets, allocating and
1029  * initializing clock vectors, and computing the promises to fulfill.
1030  *
1031  * @param curr The current action, as passed from the user context; may be
1032  * freed/invalidated after the execution of this function, with a different
1033  * action "returned" its place (pass-by-reference)
1034  * @return True if curr is a newly-explored action; false otherwise
1035  */
1036 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1037 {
1038         ModelAction *newcurr;
1039
1040         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1041                 newcurr = process_rmw(*curr);
1042                 delete *curr;
1043
1044                 if (newcurr->is_rmw())
1045                         compute_promises(newcurr);
1046
1047                 *curr = newcurr;
1048                 return false;
1049         }
1050
1051         (*curr)->set_seq_number(get_next_seq_num());
1052
1053         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1054         if (newcurr) {
1055                 /* First restore type and order in case of RMW operation */
1056                 if ((*curr)->is_rmwr())
1057                         newcurr->copy_typeandorder(*curr);
1058
1059                 ASSERT((*curr)->get_location() == newcurr->get_location());
1060                 newcurr->copy_from_new(*curr);
1061
1062                 /* Discard duplicate ModelAction; use action from NodeStack */
1063                 delete *curr;
1064
1065                 /* Always compute new clock vector */
1066                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1067
1068                 *curr = newcurr;
1069                 return false; /* Action was explored previously */
1070         } else {
1071                 newcurr = *curr;
1072
1073                 /* Always compute new clock vector */
1074                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1075
1076                 /* Assign most recent release fence */
1077                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1078
1079                 /*
1080                  * Perform one-time actions when pushing new ModelAction onto
1081                  * NodeStack
1082                  */
1083                 if (newcurr->is_write())
1084                         compute_promises(newcurr);
1085                 else if (newcurr->is_relseq_fixup())
1086                         compute_relseq_breakwrites(newcurr);
1087                 else if (newcurr->is_wait())
1088                         newcurr->get_node()->set_misc_max(2);
1089                 else if (newcurr->is_notify_one()) {
1090                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1091                 }
1092                 return true; /* This was a new ModelAction */
1093         }
1094 }
1095
1096 /**
1097  * @brief Establish reads-from relation between two actions
1098  *
1099  * Perform basic operations involved with establishing a concrete rf relation,
1100  * including setting the ModelAction data and checking for release sequences.
1101  *
1102  * @param act The action that is reading (must be a read)
1103  * @param rf The action from which we are reading (must be a write)
1104  *
1105  * @return True if this read established synchronization
1106  */
1107 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1108 {
1109         ASSERT(rf);
1110         ASSERT(rf->is_write());
1111
1112         act->set_read_from(rf);
1113         if (act->is_acquire()) {
1114                 rel_heads_list_t release_heads;
1115                 get_release_seq_heads(act, act, &release_heads);
1116                 int num_heads = release_heads.size();
1117                 for (unsigned int i = 0; i < release_heads.size(); i++)
1118                         if (!synchronize(release_heads[i], act))
1119                                 num_heads--;
1120                 return num_heads > 0;
1121         }
1122         return false;
1123 }
1124
1125 /**
1126  * @brief Synchronizes two actions
1127  *
1128  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1129  * This function performs the synchronization as well as providing other hooks
1130  * for other checks along with synchronization.
1131  *
1132  * @param first The left-hand side of the synchronizes-with relation
1133  * @param second The right-hand side of the synchronizes-with relation
1134  * @return True if the synchronization was successful (i.e., was consistent
1135  * with the execution order); false otherwise
1136  */
1137 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1138 {
1139         if (*second < *first) {
1140                 set_bad_synchronization();
1141                 return false;
1142         }
1143         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1144         return second->synchronize_with(first);
1145 }
1146
1147 /**
1148  * Check promises and eliminate potentially-satisfying threads when a thread is
1149  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1150  * no longer satisfy a promise generated from that thread.
1151  *
1152  * @param blocker The thread on which a thread is waiting
1153  * @param waiting The waiting thread
1154  */
1155 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1156 {
1157         for (unsigned int i = 0; i < promises.size(); i++) {
1158                 Promise *promise = promises[i];
1159                 if (!promise->thread_is_available(waiting->get_id()))
1160                         continue;
1161                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1162                         ModelAction *reader = promise->get_reader(j);
1163                         if (reader->get_tid() != blocker->get_id())
1164                                 continue;
1165                         if (promise->eliminate_thread(waiting->get_id())) {
1166                                 /* Promise has failed */
1167                                 priv->failed_promise = true;
1168                         } else {
1169                                 /* Only eliminate the 'waiting' thread once */
1170                                 return;
1171                         }
1172                 }
1173         }
1174 }
1175
1176 /**
1177  * @brief Check whether a model action is enabled.
1178  *
1179  * Checks whether a lock or join operation would be successful (i.e., is the
1180  * lock already locked, or is the joined thread already complete). If not, put
1181  * the action in a waiter list.
1182  *
1183  * @param curr is the ModelAction to check whether it is enabled.
1184  * @return a bool that indicates whether the action is enabled.
1185  */
1186 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1187         if (curr->is_lock()) {
1188                 std::mutex *lock = curr->get_mutex();
1189                 struct std::mutex_state *state = lock->get_state();
1190                 if (state->locked)
1191                         return false;
1192         } else if (curr->is_thread_join()) {
1193                 Thread *blocking = curr->get_thread_operand();
1194                 if (!blocking->is_complete()) {
1195                         thread_blocking_check_promises(blocking, get_thread(curr));
1196                         return false;
1197                 }
1198         }
1199
1200         return true;
1201 }
1202
1203 /**
1204  * This is the heart of the model checker routine. It performs model-checking
1205  * actions corresponding to a given "current action." Among other processes, it
1206  * calculates reads-from relationships, updates synchronization clock vectors,
1207  * forms a memory_order constraints graph, and handles replay/backtrack
1208  * execution when running permutations of previously-observed executions.
1209  *
1210  * @param curr The current action to process
1211  * @return The ModelAction that is actually executed; may be different than
1212  * curr
1213  */
1214 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1215 {
1216         ASSERT(curr);
1217         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1218         bool newly_explored = initialize_curr_action(&curr);
1219
1220         DBG();
1221
1222         wake_up_sleeping_actions(curr);
1223
1224         /* Compute fairness information for CHESS yield algorithm */
1225         if (params->yieldon) {
1226                 curr->get_node()->update_yield(scheduler);
1227         }
1228
1229         /* Add the action to lists before any other model-checking tasks */
1230         if (!second_part_of_rmw)
1231                 add_action_to_lists(curr);
1232
1233         /* Build may_read_from set for newly-created actions */
1234         if (newly_explored && curr->is_read())
1235                 build_may_read_from(curr);
1236
1237         /* Initialize work_queue with the "current action" work */
1238         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1239         while (!work_queue.empty() && !has_asserted()) {
1240                 WorkQueueEntry work = work_queue.front();
1241                 work_queue.pop_front();
1242
1243                 switch (work.type) {
1244                 case WORK_CHECK_CURR_ACTION: {
1245                         ModelAction *act = work.action;
1246                         bool update = false; /* update this location's release seq's */
1247                         bool update_all = false; /* update all release seq's */
1248
1249                         if (process_thread_action(curr))
1250                                 update_all = true;
1251
1252                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1253                                 update = true;
1254
1255                         if (act->is_write() && process_write(act))
1256                                 update = true;
1257
1258                         if (act->is_fence() && process_fence(act))
1259                                 update_all = true;
1260
1261                         if (act->is_mutex_op() && process_mutex(act))
1262                                 update_all = true;
1263
1264                         if (act->is_relseq_fixup())
1265                                 process_relseq_fixup(curr, &work_queue);
1266
1267                         if (update_all)
1268                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1269                         else if (update)
1270                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1271                         break;
1272                 }
1273                 case WORK_CHECK_RELEASE_SEQ:
1274                         resolve_release_sequences(work.location, &work_queue);
1275                         break;
1276                 case WORK_CHECK_MO_EDGES: {
1277                         /** @todo Complete verification of work_queue */
1278                         ModelAction *act = work.action;
1279                         bool updated = false;
1280
1281                         if (act->is_read()) {
1282                                 const ModelAction *rf = act->get_reads_from();
1283                                 const Promise *promise = act->get_reads_from_promise();
1284                                 if (rf) {
1285                                         if (r_modification_order(act, rf))
1286                                                 updated = true;
1287                                 } else if (promise) {
1288                                         if (r_modification_order(act, promise))
1289                                                 updated = true;
1290                                 }
1291                         }
1292                         if (act->is_write()) {
1293                                 if (w_modification_order(act, NULL))
1294                                         updated = true;
1295                         }
1296                         mo_graph->commitChanges();
1297
1298                         if (updated)
1299                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1300                         break;
1301                 }
1302                 default:
1303                         ASSERT(false);
1304                         break;
1305                 }
1306         }
1307
1308         check_curr_backtracking(curr);
1309         set_backtracking(curr);
1310         return curr;
1311 }
1312
1313 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1314 {
1315         Node *currnode = curr->get_node();
1316         Node *parnode = currnode->get_parent();
1317
1318         if ((parnode && !parnode->backtrack_empty()) ||
1319                          !currnode->misc_empty() ||
1320                          !currnode->read_from_empty() ||
1321                          !currnode->promise_empty() ||
1322                          !currnode->relseq_break_empty()) {
1323                 set_latest_backtrack(curr);
1324         }
1325 }
1326
1327 bool ModelExecution::promises_expired() const
1328 {
1329         for (unsigned int i = 0; i < promises.size(); i++) {
1330                 Promise *promise = promises[i];
1331                 if (promise->get_expiration() < priv->used_sequence_numbers)
1332                         return true;
1333         }
1334         return false;
1335 }
1336
1337 /**
1338  * This is the strongest feasibility check available.
1339  * @return whether the current trace (partial or complete) must be a prefix of
1340  * a feasible trace.
1341  */
1342 bool ModelExecution::isfeasibleprefix() const
1343 {
1344         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1345 }
1346
1347 /**
1348  * Print disagnostic information about an infeasible execution
1349  * @param prefix A string to prefix the output with; if NULL, then a default
1350  * message prefix will be provided
1351  */
1352 void ModelExecution::print_infeasibility(const char *prefix) const
1353 {
1354         char buf[100];
1355         char *ptr = buf;
1356         if (mo_graph->checkForCycles())
1357                 ptr += sprintf(ptr, "[mo cycle]");
1358         if (priv->failed_promise)
1359                 ptr += sprintf(ptr, "[failed promise]");
1360         if (priv->too_many_reads)
1361                 ptr += sprintf(ptr, "[too many reads]");
1362         if (priv->no_valid_reads)
1363                 ptr += sprintf(ptr, "[no valid reads-from]");
1364         if (priv->bad_synchronization)
1365                 ptr += sprintf(ptr, "[bad sw ordering]");
1366         if (promises_expired())
1367                 ptr += sprintf(ptr, "[promise expired]");
1368         if (promises.size() != 0)
1369                 ptr += sprintf(ptr, "[unresolved promise]");
1370         if (ptr != buf)
1371                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1372 }
1373
1374 /**
1375  * Returns whether the current completed trace is feasible, except for pending
1376  * release sequences.
1377  */
1378 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1379 {
1380         return !is_infeasible() && promises.size() == 0;
1381 }
1382
1383 /**
1384  * Check if the current partial trace is infeasible. Does not check any
1385  * end-of-execution flags, which might rule out the execution. Thus, this is
1386  * useful only for ruling an execution as infeasible.
1387  * @return whether the current partial trace is infeasible.
1388  */
1389 bool ModelExecution::is_infeasible() const
1390 {
1391         return mo_graph->checkForCycles() ||
1392                 priv->no_valid_reads ||
1393                 priv->failed_promise ||
1394                 priv->too_many_reads ||
1395                 priv->bad_synchronization ||
1396                 promises_expired();
1397 }
1398
1399 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1400 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1401         ModelAction *lastread = get_last_action(act->get_tid());
1402         lastread->process_rmw(act);
1403         if (act->is_rmw()) {
1404                 if (lastread->get_reads_from())
1405                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1406                 else
1407                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1408                 mo_graph->commitChanges();
1409         }
1410         return lastread;
1411 }
1412
1413 /**
1414  * A helper function for ModelExecution::check_recency, to check if the current
1415  * thread is able to read from a different write/promise for 'params.maxreads'
1416  * number of steps and if that write/promise should become visible (i.e., is
1417  * ordered later in the modification order). This helps model memory liveness.
1418  *
1419  * @param curr The current action. Must be a read.
1420  * @param rf The write/promise from which we plan to read
1421  * @param other_rf The write/promise from which we may read
1422  * @return True if we were able to read from other_rf for params.maxreads steps
1423  */
1424 template <typename T, typename U>
1425 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1426 {
1427         /* Need a different write/promise */
1428         if (other_rf->equals(rf))
1429                 return false;
1430
1431         /* Only look for "newer" writes/promises */
1432         if (!mo_graph->checkReachable(rf, other_rf))
1433                 return false;
1434
1435         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1436         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1437         action_list_t::reverse_iterator rit = list->rbegin();
1438         ASSERT((*rit) == curr);
1439         /* Skip past curr */
1440         rit++;
1441
1442         /* Does this write/promise work for everyone? */
1443         for (int i = 0; i < params->maxreads; i++, rit++) {
1444                 ModelAction *act = *rit;
1445                 if (!act->may_read_from(other_rf))
1446                         return false;
1447         }
1448         return true;
1449 }
1450
1451 /**
1452  * Checks whether a thread has read from the same write or Promise for too many
1453  * times without seeing the effects of a later write/Promise.
1454  *
1455  * Basic idea:
1456  * 1) there must a different write/promise that we could read from,
1457  * 2) we must have read from the same write/promise in excess of maxreads times,
1458  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1459  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1460  *
1461  * If so, we decide that the execution is no longer feasible.
1462  *
1463  * @param curr The current action. Must be a read.
1464  * @param rf The ModelAction/Promise from which we might read.
1465  * @return True if the read should succeed; false otherwise
1466  */
1467 template <typename T>
1468 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1469 {
1470         if (!params->maxreads)
1471                 return true;
1472
1473         //NOTE: Next check is just optimization, not really necessary....
1474         if (curr->get_node()->get_read_from_past_size() +
1475                         curr->get_node()->get_read_from_promise_size() <= 1)
1476                 return true;
1477
1478         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1479         int tid = id_to_int(curr->get_tid());
1480         ASSERT(tid < (int)thrd_lists->size());
1481         action_list_t *list = &(*thrd_lists)[tid];
1482         action_list_t::reverse_iterator rit = list->rbegin();
1483         ASSERT((*rit) == curr);
1484         /* Skip past curr */
1485         rit++;
1486
1487         action_list_t::reverse_iterator ritcopy = rit;
1488         /* See if we have enough reads from the same value */
1489         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1490                 if (ritcopy == list->rend())
1491                         return true;
1492                 ModelAction *act = *ritcopy;
1493                 if (!act->is_read())
1494                         return true;
1495                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1496                         return true;
1497                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1498                         return true;
1499                 if (act->get_node()->get_read_from_past_size() +
1500                                 act->get_node()->get_read_from_promise_size() <= 1)
1501                         return true;
1502         }
1503         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1504                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1505                 if (should_read_instead(curr, rf, write))
1506                         return false; /* liveness failure */
1507         }
1508         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1509                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1510                 if (should_read_instead(curr, rf, promise))
1511                         return false; /* liveness failure */
1512         }
1513         return true;
1514 }
1515
1516 /**
1517  * @brief Updates the mo_graph with the constraints imposed from the current
1518  * read.
1519  *
1520  * Basic idea is the following: Go through each other thread and find
1521  * the last action that happened before our read.  Two cases:
1522  *
1523  * -# The action is a write: that write must either occur before
1524  * the write we read from or be the write we read from.
1525  * -# The action is a read: the write that that action read from
1526  * must occur before the write we read from or be the same write.
1527  *
1528  * @param curr The current action. Must be a read.
1529  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1530  * @return True if modification order edges were added; false otherwise
1531  */
1532 template <typename rf_type>
1533 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1534 {
1535         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1536         unsigned int i;
1537         bool added = false;
1538         ASSERT(curr->is_read());
1539
1540         /* Last SC fence in the current thread */
1541         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1542         ModelAction *last_sc_write = NULL;
1543         if (curr->is_seqcst())
1544                 last_sc_write = get_last_seq_cst_write(curr);
1545
1546         /* Iterate over all threads */
1547         for (i = 0; i < thrd_lists->size(); i++) {
1548                 /* Last SC fence in thread i */
1549                 ModelAction *last_sc_fence_thread_local = NULL;
1550                 if (int_to_id((int)i) != curr->get_tid())
1551                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1552
1553                 /* Last SC fence in thread i, before last SC fence in current thread */
1554                 ModelAction *last_sc_fence_thread_before = NULL;
1555                 if (last_sc_fence_local)
1556                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1557
1558                 /* Iterate over actions in thread, starting from most recent */
1559                 action_list_t *list = &(*thrd_lists)[i];
1560                 action_list_t::reverse_iterator rit;
1561                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1562                         ModelAction *act = *rit;
1563
1564                         /* Skip curr */
1565                         if (act == curr)
1566                                 continue;
1567                         /* Don't want to add reflexive edges on 'rf' */
1568                         if (act->equals(rf)) {
1569                                 if (act->happens_before(curr))
1570                                         break;
1571                                 else
1572                                         continue;
1573                         }
1574
1575                         if (act->is_write()) {
1576                                 /* C++, Section 29.3 statement 5 */
1577                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1578                                                 *act < *last_sc_fence_thread_local) {
1579                                         added = mo_graph->addEdge(act, rf) || added;
1580                                         break;
1581                                 }
1582                                 /* C++, Section 29.3 statement 4 */
1583                                 else if (act->is_seqcst() && last_sc_fence_local &&
1584                                                 *act < *last_sc_fence_local) {
1585                                         added = mo_graph->addEdge(act, rf) || added;
1586                                         break;
1587                                 }
1588                                 /* C++, Section 29.3 statement 6 */
1589                                 else if (last_sc_fence_thread_before &&
1590                                                 *act < *last_sc_fence_thread_before) {
1591                                         added = mo_graph->addEdge(act, rf) || added;
1592                                         break;
1593                                 }
1594                         }
1595
1596                         /* C++, Section 29.3 statement 3 (second subpoint) */
1597                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1598                                 added = mo_graph->addEdge(act, rf) || added;
1599                                 break;
1600                         }
1601
1602                         /*
1603                          * Include at most one act per-thread that "happens
1604                          * before" curr
1605                          */
1606                         if (act->happens_before(curr)) {
1607                                 if (act->is_write()) {
1608                                         added = mo_graph->addEdge(act, rf) || added;
1609                                 } else {
1610                                         const ModelAction *prevrf = act->get_reads_from();
1611                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1612                                         if (prevrf) {
1613                                                 if (!prevrf->equals(rf))
1614                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1615                                         } else if (!prevrf_promise->equals(rf)) {
1616                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1617                                         }
1618                                 }
1619                                 break;
1620                         }
1621                 }
1622         }
1623
1624         /*
1625          * All compatible, thread-exclusive promises must be ordered after any
1626          * concrete loads from the same thread
1627          */
1628         for (unsigned int i = 0; i < promises.size(); i++)
1629                 if (promises[i]->is_compatible_exclusive(curr))
1630                         added = mo_graph->addEdge(rf, promises[i]) || added;
1631
1632         return added;
1633 }
1634
1635 /**
1636  * Updates the mo_graph with the constraints imposed from the current write.
1637  *
1638  * Basic idea is the following: Go through each other thread and find
1639  * the lastest action that happened before our write.  Two cases:
1640  *
1641  * (1) The action is a write => that write must occur before
1642  * the current write
1643  *
1644  * (2) The action is a read => the write that that action read from
1645  * must occur before the current write.
1646  *
1647  * This method also handles two other issues:
1648  *
1649  * (I) Sequential Consistency: Making sure that if the current write is
1650  * seq_cst, that it occurs after the previous seq_cst write.
1651  *
1652  * (II) Sending the write back to non-synchronizing reads.
1653  *
1654  * @param curr The current action. Must be a write.
1655  * @param send_fv A vector for stashing reads to which we may pass our future
1656  * value. If NULL, then don't record any future values.
1657  * @return True if modification order edges were added; false otherwise
1658  */
1659 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1660 {
1661         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1662         unsigned int i;
1663         bool added = false;
1664         ASSERT(curr->is_write());
1665
1666         if (curr->is_seqcst()) {
1667                 /* We have to at least see the last sequentially consistent write,
1668                          so we are initialized. */
1669                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1670                 if (last_seq_cst != NULL) {
1671                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1672                 }
1673         }
1674
1675         /* Last SC fence in the current thread */
1676         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1677
1678         /* Iterate over all threads */
1679         for (i = 0; i < thrd_lists->size(); i++) {
1680                 /* Last SC fence in thread i, before last SC fence in current thread */
1681                 ModelAction *last_sc_fence_thread_before = NULL;
1682                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1683                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1684
1685                 /* Iterate over actions in thread, starting from most recent */
1686                 action_list_t *list = &(*thrd_lists)[i];
1687                 action_list_t::reverse_iterator rit;
1688                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1689                         ModelAction *act = *rit;
1690                         if (act == curr) {
1691                                 /*
1692                                  * 1) If RMW and it actually read from something, then we
1693                                  * already have all relevant edges, so just skip to next
1694                                  * thread.
1695                                  *
1696                                  * 2) If RMW and it didn't read from anything, we should
1697                                  * whatever edge we can get to speed up convergence.
1698                                  *
1699                                  * 3) If normal write, we need to look at earlier actions, so
1700                                  * continue processing list.
1701                                  */
1702                                 if (curr->is_rmw()) {
1703                                         if (curr->get_reads_from() != NULL)
1704                                                 break;
1705                                         else
1706                                                 continue;
1707                                 } else
1708                                         continue;
1709                         }
1710
1711                         /* C++, Section 29.3 statement 7 */
1712                         if (last_sc_fence_thread_before && act->is_write() &&
1713                                         *act < *last_sc_fence_thread_before) {
1714                                 added = mo_graph->addEdge(act, curr) || added;
1715                                 break;
1716                         }
1717
1718                         /*
1719                          * Include at most one act per-thread that "happens
1720                          * before" curr
1721                          */
1722                         if (act->happens_before(curr)) {
1723                                 /*
1724                                  * Note: if act is RMW, just add edge:
1725                                  *   act --mo--> curr
1726                                  * The following edge should be handled elsewhere:
1727                                  *   readfrom(act) --mo--> act
1728                                  */
1729                                 if (act->is_write())
1730                                         added = mo_graph->addEdge(act, curr) || added;
1731                                 else if (act->is_read()) {
1732                                         //if previous read accessed a null, just keep going
1733                                         if (act->get_reads_from() == NULL)
1734                                                 continue;
1735                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1736                                 }
1737                                 break;
1738                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1739                                                      !act->same_thread(curr)) {
1740                                 /* We have an action that:
1741                                    (1) did not happen before us
1742                                    (2) is a read and we are a write
1743                                    (3) cannot synchronize with us
1744                                    (4) is in a different thread
1745                                    =>
1746                                    that read could potentially read from our write.  Note that
1747                                    these checks are overly conservative at this point, we'll
1748                                    do more checks before actually removing the
1749                                    pendingfuturevalue.
1750
1751                                  */
1752                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1753                                         if (!is_infeasible())
1754                                                 send_fv->push_back(act);
1755                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1756                                                 add_future_value(curr, act);
1757                                 }
1758                         }
1759                 }
1760         }
1761
1762         /*
1763          * All compatible, thread-exclusive promises must be ordered after any
1764          * concrete stores to the same thread, or else they can be merged with
1765          * this store later
1766          */
1767         for (unsigned int i = 0; i < promises.size(); i++)
1768                 if (promises[i]->is_compatible_exclusive(curr))
1769                         added = mo_graph->addEdge(curr, promises[i]) || added;
1770
1771         return added;
1772 }
1773
1774 /** Arbitrary reads from the future are not allowed.  Section 29.3
1775  * part 9 places some constraints.  This method checks one result of constraint
1776  * constraint.  Others require compiler support. */
1777 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1778 {
1779         if (!writer->is_rmw())
1780                 return true;
1781
1782         if (!reader->is_rmw())
1783                 return true;
1784
1785         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1786                 if (search == reader)
1787                         return false;
1788                 if (search->get_tid() == reader->get_tid() &&
1789                                 search->happens_before(reader))
1790                         break;
1791         }
1792
1793         return true;
1794 }
1795
1796 /**
1797  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1798  * some constraints. This method checks one the following constraint (others
1799  * require compiler support):
1800  *
1801  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1802  */
1803 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1804 {
1805         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1806         unsigned int i;
1807         /* Iterate over all threads */
1808         for (i = 0; i < thrd_lists->size(); i++) {
1809                 const ModelAction *write_after_read = NULL;
1810
1811                 /* Iterate over actions in thread, starting from most recent */
1812                 action_list_t *list = &(*thrd_lists)[i];
1813                 action_list_t::reverse_iterator rit;
1814                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1815                         ModelAction *act = *rit;
1816
1817                         /* Don't disallow due to act == reader */
1818                         if (!reader->happens_before(act) || reader == act)
1819                                 break;
1820                         else if (act->is_write())
1821                                 write_after_read = act;
1822                         else if (act->is_read() && act->get_reads_from() != NULL)
1823                                 write_after_read = act->get_reads_from();
1824                 }
1825
1826                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1827                         return false;
1828         }
1829         return true;
1830 }
1831
1832 /**
1833  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1834  * The ModelAction under consideration is expected to be taking part in
1835  * release/acquire synchronization as an object of the "reads from" relation.
1836  * Note that this can only provide release sequence support for RMW chains
1837  * which do not read from the future, as those actions cannot be traced until
1838  * their "promise" is fulfilled. Similarly, we may not even establish the
1839  * presence of a release sequence with certainty, as some modification order
1840  * constraints may be decided further in the future. Thus, this function
1841  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1842  * and a boolean representing certainty.
1843  *
1844  * @param rf The action that might be part of a release sequence. Must be a
1845  * write.
1846  * @param release_heads A pass-by-reference style return parameter. After
1847  * execution of this function, release_heads will contain the heads of all the
1848  * relevant release sequences, if any exists with certainty
1849  * @param pending A pass-by-reference style return parameter which is only used
1850  * when returning false (i.e., uncertain). Returns most information regarding
1851  * an uncertain release sequence, including any write operations that might
1852  * break the sequence.
1853  * @return true, if the ModelExecution is certain that release_heads is complete;
1854  * false otherwise
1855  */
1856 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1857                 rel_heads_list_t *release_heads,
1858                 struct release_seq *pending) const
1859 {
1860         /* Only check for release sequences if there are no cycles */
1861         if (mo_graph->checkForCycles())
1862                 return false;
1863
1864         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1865                 ASSERT(rf->is_write());
1866
1867                 if (rf->is_release())
1868                         release_heads->push_back(rf);
1869                 else if (rf->get_last_fence_release())
1870                         release_heads->push_back(rf->get_last_fence_release());
1871                 if (!rf->is_rmw())
1872                         break; /* End of RMW chain */
1873
1874                 /** @todo Need to be smarter here...  In the linux lock
1875                  * example, this will run to the beginning of the program for
1876                  * every acquire. */
1877                 /** @todo The way to be smarter here is to keep going until 1
1878                  * thread has a release preceded by an acquire and you've seen
1879                  *       both. */
1880
1881                 /* acq_rel RMW is a sufficient stopping condition */
1882                 if (rf->is_acquire() && rf->is_release())
1883                         return true; /* complete */
1884         };
1885         if (!rf) {
1886                 /* read from future: need to settle this later */
1887                 pending->rf = NULL;
1888                 return false; /* incomplete */
1889         }
1890
1891         if (rf->is_release())
1892                 return true; /* complete */
1893
1894         /* else relaxed write
1895          * - check for fence-release in the same thread (29.8, stmt. 3)
1896          * - check modification order for contiguous subsequence
1897          *   -> rf must be same thread as release */
1898
1899         const ModelAction *fence_release = rf->get_last_fence_release();
1900         /* Synchronize with a fence-release unconditionally; we don't need to
1901          * find any more "contiguous subsequence..." for it */
1902         if (fence_release)
1903                 release_heads->push_back(fence_release);
1904
1905         int tid = id_to_int(rf->get_tid());
1906         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1907         action_list_t *list = &(*thrd_lists)[tid];
1908         action_list_t::const_reverse_iterator rit;
1909
1910         /* Find rf in the thread list */
1911         rit = std::find(list->rbegin(), list->rend(), rf);
1912         ASSERT(rit != list->rend());
1913
1914         /* Find the last {write,fence}-release */
1915         for (; rit != list->rend(); rit++) {
1916                 if (fence_release && *(*rit) < *fence_release)
1917                         break;
1918                 if ((*rit)->is_release())
1919                         break;
1920         }
1921         if (rit == list->rend()) {
1922                 /* No write-release in this thread */
1923                 return true; /* complete */
1924         } else if (fence_release && *(*rit) < *fence_release) {
1925                 /* The fence-release is more recent (and so, "stronger") than
1926                  * the most recent write-release */
1927                 return true; /* complete */
1928         } /* else, need to establish contiguous release sequence */
1929         ModelAction *release = *rit;
1930
1931         ASSERT(rf->same_thread(release));
1932
1933         pending->writes.clear();
1934
1935         bool certain = true;
1936         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1937                 if (id_to_int(rf->get_tid()) == (int)i)
1938                         continue;
1939                 list = &(*thrd_lists)[i];
1940
1941                 /* Can we ensure no future writes from this thread may break
1942                  * the release seq? */
1943                 bool future_ordered = false;
1944
1945                 ModelAction *last = get_last_action(int_to_id(i));
1946                 Thread *th = get_thread(int_to_id(i));
1947                 if ((last && rf->happens_before(last)) ||
1948                                 !is_enabled(th) ||
1949                                 th->is_complete())
1950                         future_ordered = true;
1951
1952                 ASSERT(!th->is_model_thread() || future_ordered);
1953
1954                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1955                         const ModelAction *act = *rit;
1956                         /* Reach synchronization -> this thread is complete */
1957                         if (act->happens_before(release))
1958                                 break;
1959                         if (rf->happens_before(act)) {
1960                                 future_ordered = true;
1961                                 continue;
1962                         }
1963
1964                         /* Only non-RMW writes can break release sequences */
1965                         if (!act->is_write() || act->is_rmw())
1966                                 continue;
1967
1968                         /* Check modification order */
1969                         if (mo_graph->checkReachable(rf, act)) {
1970                                 /* rf --mo--> act */
1971                                 future_ordered = true;
1972                                 continue;
1973                         }
1974                         if (mo_graph->checkReachable(act, release))
1975                                 /* act --mo--> release */
1976                                 break;
1977                         if (mo_graph->checkReachable(release, act) &&
1978                                       mo_graph->checkReachable(act, rf)) {
1979                                 /* release --mo-> act --mo--> rf */
1980                                 return true; /* complete */
1981                         }
1982                         /* act may break release sequence */
1983                         pending->writes.push_back(act);
1984                         certain = false;
1985                 }
1986                 if (!future_ordered)
1987                         certain = false; /* This thread is uncertain */
1988         }
1989
1990         if (certain) {
1991                 release_heads->push_back(release);
1992                 pending->writes.clear();
1993         } else {
1994                 pending->release = release;
1995                 pending->rf = rf;
1996         }
1997         return certain;
1998 }
1999
2000 /**
2001  * An interface for getting the release sequence head(s) with which a
2002  * given ModelAction must synchronize. This function only returns a non-empty
2003  * result when it can locate a release sequence head with certainty. Otherwise,
2004  * it may mark the internal state of the ModelExecution so that it will handle
2005  * the release sequence at a later time, causing @a acquire to update its
2006  * synchronization at some later point in execution.
2007  *
2008  * @param acquire The 'acquire' action that may synchronize with a release
2009  * sequence
2010  * @param read The read action that may read from a release sequence; this may
2011  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2012  * when 'acquire' is a fence-acquire)
2013  * @param release_heads A pass-by-reference return parameter. Will be filled
2014  * with the head(s) of the release sequence(s), if they exists with certainty.
2015  * @see ModelExecution::release_seq_heads
2016  */
2017 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2018                 ModelAction *read, rel_heads_list_t *release_heads)
2019 {
2020         const ModelAction *rf = read->get_reads_from();
2021         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2022         sequence->acquire = acquire;
2023         sequence->read = read;
2024
2025         if (!release_seq_heads(rf, release_heads, sequence)) {
2026                 /* add act to 'lazy checking' list */
2027                 pending_rel_seqs.push_back(sequence);
2028         } else {
2029                 snapshot_free(sequence);
2030         }
2031 }
2032
2033 /**
2034  * Attempt to resolve all stashed operations that might synchronize with a
2035  * release sequence for a given location. This implements the "lazy" portion of
2036  * determining whether or not a release sequence was contiguous, since not all
2037  * modification order information is present at the time an action occurs.
2038  *
2039  * @param location The location/object that should be checked for release
2040  * sequence resolutions. A NULL value means to check all locations.
2041  * @param work_queue The work queue to which to add work items as they are
2042  * generated
2043  * @return True if any updates occurred (new synchronization, new mo_graph
2044  * edges)
2045  */
2046 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2047 {
2048         bool updated = false;
2049         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2050         while (it != pending_rel_seqs.end()) {
2051                 struct release_seq *pending = *it;
2052                 ModelAction *acquire = pending->acquire;
2053                 const ModelAction *read = pending->read;
2054
2055                 /* Only resolve sequences on the given location, if provided */
2056                 if (location && read->get_location() != location) {
2057                         it++;
2058                         continue;
2059                 }
2060
2061                 const ModelAction *rf = read->get_reads_from();
2062                 rel_heads_list_t release_heads;
2063                 bool complete;
2064                 complete = release_seq_heads(rf, &release_heads, pending);
2065                 for (unsigned int i = 0; i < release_heads.size(); i++)
2066                         if (!acquire->has_synchronized_with(release_heads[i]))
2067                                 if (synchronize(release_heads[i], acquire))
2068                                         updated = true;
2069
2070                 if (updated) {
2071                         /* Re-check all pending release sequences */
2072                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2073                         /* Re-check read-acquire for mo_graph edges */
2074                         if (acquire->is_read())
2075                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2076
2077                         /* propagate synchronization to later actions */
2078                         action_list_t::reverse_iterator rit = action_trace.rbegin();
2079                         for (; (*rit) != acquire; rit++) {
2080                                 ModelAction *propagate = *rit;
2081                                 if (acquire->happens_before(propagate)) {
2082                                         synchronize(acquire, propagate);
2083                                         /* Re-check 'propagate' for mo_graph edges */
2084                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2085                                 }
2086                         }
2087                 }
2088                 if (complete) {
2089                         it = pending_rel_seqs.erase(it);
2090                         snapshot_free(pending);
2091                 } else {
2092                         it++;
2093                 }
2094         }
2095
2096         // If we resolved promises or data races, see if we have realized a data race.
2097         checkDataRaces();
2098
2099         return updated;
2100 }
2101
2102 /**
2103  * Performs various bookkeeping operations for the current ModelAction. For
2104  * instance, adds action to the per-object, per-thread action vector and to the
2105  * action trace list of all thread actions.
2106  *
2107  * @param act is the ModelAction to add.
2108  */
2109 void ModelExecution::add_action_to_lists(ModelAction *act)
2110 {
2111         int tid = id_to_int(act->get_tid());
2112         ModelAction *uninit = NULL;
2113         int uninit_id = -1;
2114         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2115         if (list->empty() && act->is_atomic_var()) {
2116                 uninit = get_uninitialized_action(act);
2117                 uninit_id = id_to_int(uninit->get_tid());
2118                 list->push_front(uninit);
2119         }
2120         list->push_back(act);
2121
2122         action_trace.push_back(act);
2123         if (uninit)
2124                 action_trace.push_front(uninit);
2125
2126         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2127         if (tid >= (int)vec->size())
2128                 vec->resize(priv->next_thread_id);
2129         (*vec)[tid].push_back(act);
2130         if (uninit)
2131                 (*vec)[uninit_id].push_front(uninit);
2132
2133         if ((int)thrd_last_action.size() <= tid)
2134                 thrd_last_action.resize(get_num_threads());
2135         thrd_last_action[tid] = act;
2136         if (uninit)
2137                 thrd_last_action[uninit_id] = uninit;
2138
2139         if (act->is_fence() && act->is_release()) {
2140                 if ((int)thrd_last_fence_release.size() <= tid)
2141                         thrd_last_fence_release.resize(get_num_threads());
2142                 thrd_last_fence_release[tid] = act;
2143         }
2144
2145         if (act->is_wait()) {
2146                 void *mutex_loc = (void *) act->get_value();
2147                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2148
2149                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2150                 if (tid >= (int)vec->size())
2151                         vec->resize(priv->next_thread_id);
2152                 (*vec)[tid].push_back(act);
2153         }
2154 }
2155
2156 /**
2157  * @brief Get the last action performed by a particular Thread
2158  * @param tid The thread ID of the Thread in question
2159  * @return The last action in the thread
2160  */
2161 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2162 {
2163         int threadid = id_to_int(tid);
2164         if (threadid < (int)thrd_last_action.size())
2165                 return thrd_last_action[id_to_int(tid)];
2166         else
2167                 return NULL;
2168 }
2169
2170 /**
2171  * @brief Get the last fence release performed by a particular Thread
2172  * @param tid The thread ID of the Thread in question
2173  * @return The last fence release in the thread, if one exists; NULL otherwise
2174  */
2175 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2176 {
2177         int threadid = id_to_int(tid);
2178         if (threadid < (int)thrd_last_fence_release.size())
2179                 return thrd_last_fence_release[id_to_int(tid)];
2180         else
2181                 return NULL;
2182 }
2183
2184 /**
2185  * Gets the last memory_order_seq_cst write (in the total global sequence)
2186  * performed on a particular object (i.e., memory location), not including the
2187  * current action.
2188  * @param curr The current ModelAction; also denotes the object location to
2189  * check
2190  * @return The last seq_cst write
2191  */
2192 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2193 {
2194         void *location = curr->get_location();
2195         action_list_t *list = obj_map.get(location);
2196         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2197         action_list_t::reverse_iterator rit;
2198         for (rit = list->rbegin(); (*rit) != curr; rit++)
2199                 ;
2200         rit++; /* Skip past curr */
2201         for ( ; rit != list->rend(); rit++)
2202                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2203                         return *rit;
2204         return NULL;
2205 }
2206
2207 /**
2208  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2209  * performed in a particular thread, prior to a particular fence.
2210  * @param tid The ID of the thread to check
2211  * @param before_fence The fence from which to begin the search; if NULL, then
2212  * search for the most recent fence in the thread.
2213  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2214  */
2215 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2216 {
2217         /* All fences should have location FENCE_LOCATION */
2218         action_list_t *list = obj_map.get(FENCE_LOCATION);
2219
2220         if (!list)
2221                 return NULL;
2222
2223         action_list_t::reverse_iterator rit = list->rbegin();
2224
2225         if (before_fence) {
2226                 for (; rit != list->rend(); rit++)
2227                         if (*rit == before_fence)
2228                                 break;
2229
2230                 ASSERT(*rit == before_fence);
2231                 rit++;
2232         }
2233
2234         for (; rit != list->rend(); rit++)
2235                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2236                         return *rit;
2237         return NULL;
2238 }
2239
2240 /**
2241  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2242  * location). This function identifies the mutex according to the current
2243  * action, which is presumed to perform on the same mutex.
2244  * @param curr The current ModelAction; also denotes the object location to
2245  * check
2246  * @return The last unlock operation
2247  */
2248 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2249 {
2250         void *location = curr->get_location();
2251         action_list_t *list = obj_map.get(location);
2252         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2253         action_list_t::reverse_iterator rit;
2254         for (rit = list->rbegin(); rit != list->rend(); rit++)
2255                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2256                         return *rit;
2257         return NULL;
2258 }
2259
2260 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2261 {
2262         ModelAction *parent = get_last_action(tid);
2263         if (!parent)
2264                 parent = get_thread(tid)->get_creation();
2265         return parent;
2266 }
2267
2268 /**
2269  * Returns the clock vector for a given thread.
2270  * @param tid The thread whose clock vector we want
2271  * @return Desired clock vector
2272  */
2273 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2274 {
2275         return get_parent_action(tid)->get_cv();
2276 }
2277
2278 /**
2279  * @brief Find the promise (if any) to resolve for the current action and
2280  * remove it from the pending promise vector
2281  * @param curr The current ModelAction. Should be a write.
2282  * @return The Promise to resolve, if any; otherwise NULL
2283  */
2284 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2285 {
2286         for (unsigned int i = 0; i < promises.size(); i++)
2287                 if (curr->get_node()->get_promise(i)) {
2288                         Promise *ret = promises[i];
2289                         promises.erase(promises.begin() + i);
2290                         return ret;
2291                 }
2292         return NULL;
2293 }
2294
2295 /**
2296  * Resolve a Promise with a current write.
2297  * @param write The ModelAction that is fulfilling Promises
2298  * @param promise The Promise to resolve
2299  * @return True if the Promise was successfully resolved; false otherwise
2300  */
2301 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2302 {
2303         ModelVector<ModelAction *> actions_to_check;
2304
2305         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2306                 ModelAction *read = promise->get_reader(i);
2307                 read_from(read, write);
2308                 actions_to_check.push_back(read);
2309         }
2310         /* Make sure the promise's value matches the write's value */
2311         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2312         if (!mo_graph->resolvePromise(promise, write))
2313                 priv->failed_promise = true;
2314
2315         /**
2316          * @todo  It is possible to end up in an inconsistent state, where a
2317          * "resolved" promise may still be referenced if
2318          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2319          *
2320          * Note that the inconsistency only matters when dumping mo_graph to
2321          * file.
2322          *
2323          * delete promise;
2324          */
2325
2326         //Check whether reading these writes has made threads unable to
2327         //resolve promises
2328         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2329                 ModelAction *read = actions_to_check[i];
2330                 mo_check_promises(read, true);
2331         }
2332
2333         return true;
2334 }
2335
2336 /**
2337  * Compute the set of promises that could potentially be satisfied by this
2338  * action. Note that the set computation actually appears in the Node, not in
2339  * ModelExecution.
2340  * @param curr The ModelAction that may satisfy promises
2341  */
2342 void ModelExecution::compute_promises(ModelAction *curr)
2343 {
2344         for (unsigned int i = 0; i < promises.size(); i++) {
2345                 Promise *promise = promises[i];
2346                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2347                         continue;
2348
2349                 bool satisfy = true;
2350                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2351                         const ModelAction *act = promise->get_reader(j);
2352                         if (act->happens_before(curr) ||
2353                                         act->could_synchronize_with(curr)) {
2354                                 satisfy = false;
2355                                 break;
2356                         }
2357                 }
2358                 if (satisfy)
2359                         curr->get_node()->set_promise(i);
2360         }
2361 }
2362
2363 /** Checks promises in response to change in ClockVector Threads. */
2364 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2365 {
2366         for (unsigned int i = 0; i < promises.size(); i++) {
2367                 Promise *promise = promises[i];
2368                 if (!promise->thread_is_available(tid))
2369                         continue;
2370                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2371                         const ModelAction *act = promise->get_reader(j);
2372                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2373                                         merge_cv->synchronized_since(act)) {
2374                                 if (promise->eliminate_thread(tid)) {
2375                                         /* Promise has failed */
2376                                         priv->failed_promise = true;
2377                                         return;
2378                                 }
2379                         }
2380                 }
2381         }
2382 }
2383
2384 void ModelExecution::check_promises_thread_disabled()
2385 {
2386         for (unsigned int i = 0; i < promises.size(); i++) {
2387                 Promise *promise = promises[i];
2388                 if (promise->has_failed()) {
2389                         priv->failed_promise = true;
2390                         return;
2391                 }
2392         }
2393 }
2394
2395 /**
2396  * @brief Checks promises in response to addition to modification order for
2397  * threads.
2398  *
2399  * We test whether threads are still available for satisfying promises after an
2400  * addition to our modification order constraints. Those that are unavailable
2401  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2402  * that promise has failed.
2403  *
2404  * @param act The ModelAction which updated the modification order
2405  * @param is_read_check Should be true if act is a read and we must check for
2406  * updates to the store from which it read (there is a distinction here for
2407  * RMW's, which are both a load and a store)
2408  */
2409 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2410 {
2411         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2412
2413         for (unsigned int i = 0; i < promises.size(); i++) {
2414                 Promise *promise = promises[i];
2415
2416                 // Is this promise on the same location?
2417                 if (!promise->same_location(write))
2418                         continue;
2419
2420                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2421                         const ModelAction *pread = promise->get_reader(j);
2422                         if (!pread->happens_before(act))
2423                                continue;
2424                         if (mo_graph->checkPromise(write, promise)) {
2425                                 priv->failed_promise = true;
2426                                 return;
2427                         }
2428                         break;
2429                 }
2430
2431                 // Don't do any lookups twice for the same thread
2432                 if (!promise->thread_is_available(act->get_tid()))
2433                         continue;
2434
2435                 if (mo_graph->checkReachable(promise, write)) {
2436                         if (mo_graph->checkPromise(write, promise)) {
2437                                 priv->failed_promise = true;
2438                                 return;
2439                         }
2440                 }
2441         }
2442 }
2443
2444 /**
2445  * Compute the set of writes that may break the current pending release
2446  * sequence. This information is extracted from previou release sequence
2447  * calculations.
2448  *
2449  * @param curr The current ModelAction. Must be a release sequence fixup
2450  * action.
2451  */
2452 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2453 {
2454         if (pending_rel_seqs.empty())
2455                 return;
2456
2457         struct release_seq *pending = pending_rel_seqs.back();
2458         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2459                 const ModelAction *write = pending->writes[i];
2460                 curr->get_node()->add_relseq_break(write);
2461         }
2462
2463         /* NULL means don't break the sequence; just synchronize */
2464         curr->get_node()->add_relseq_break(NULL);
2465 }
2466
2467 /**
2468  * Build up an initial set of all past writes that this 'read' action may read
2469  * from, as well as any previously-observed future values that must still be valid.
2470  *
2471  * @param curr is the current ModelAction that we are exploring; it must be a
2472  * 'read' operation.
2473  */
2474 void ModelExecution::build_may_read_from(ModelAction *curr)
2475 {
2476         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2477         unsigned int i;
2478         ASSERT(curr->is_read());
2479
2480         ModelAction *last_sc_write = NULL;
2481
2482         if (curr->is_seqcst())
2483                 last_sc_write = get_last_seq_cst_write(curr);
2484
2485         /* Iterate over all threads */
2486         for (i = 0; i < thrd_lists->size(); i++) {
2487                 /* Iterate over actions in thread, starting from most recent */
2488                 action_list_t *list = &(*thrd_lists)[i];
2489                 action_list_t::reverse_iterator rit;
2490                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2491                         ModelAction *act = *rit;
2492
2493                         /* Only consider 'write' actions */
2494                         if (!act->is_write() || act == curr)
2495                                 continue;
2496
2497                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2498                         bool allow_read = true;
2499
2500                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2501                                 allow_read = false;
2502                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2503                                 allow_read = false;
2504
2505                         if (allow_read) {
2506                                 /* Only add feasible reads */
2507                                 mo_graph->startChanges();
2508                                 r_modification_order(curr, act);
2509                                 if (!is_infeasible())
2510                                         curr->get_node()->add_read_from_past(act);
2511                                 mo_graph->rollbackChanges();
2512                         }
2513
2514                         /* Include at most one act per-thread that "happens before" curr */
2515                         if (act->happens_before(curr))
2516                                 break;
2517                 }
2518         }
2519
2520         /* Inherit existing, promised future values */
2521         for (i = 0; i < promises.size(); i++) {
2522                 const Promise *promise = promises[i];
2523                 const ModelAction *promise_read = promise->get_reader(0);
2524                 if (promise_read->same_var(curr)) {
2525                         /* Only add feasible future-values */
2526                         mo_graph->startChanges();
2527                         r_modification_order(curr, promise);
2528                         if (!is_infeasible())
2529                                 curr->get_node()->add_read_from_promise(promise_read);
2530                         mo_graph->rollbackChanges();
2531                 }
2532         }
2533
2534         /* We may find no valid may-read-from only if the execution is doomed */
2535         if (!curr->get_node()->read_from_size()) {
2536                 priv->no_valid_reads = true;
2537                 set_assert();
2538         }
2539
2540         if (DBG_ENABLED()) {
2541                 model_print("Reached read action:\n");
2542                 curr->print();
2543                 model_print("Printing read_from_past\n");
2544                 curr->get_node()->print_read_from_past();
2545                 model_print("End printing read_from_past\n");
2546         }
2547 }
2548
2549 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2550 {
2551         for ( ; write != NULL; write = write->get_reads_from()) {
2552                 /* UNINIT actions don't have a Node, and they never sleep */
2553                 if (write->is_uninitialized())
2554                         return true;
2555                 Node *prevnode = write->get_node()->get_parent();
2556
2557                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2558                 if (write->is_release() && thread_sleep)
2559                         return true;
2560                 if (!write->is_rmw())
2561                         return false;
2562         }
2563         return true;
2564 }
2565
2566 /**
2567  * @brief Get an action representing an uninitialized atomic
2568  *
2569  * This function may create a new one or try to retrieve one from the NodeStack
2570  *
2571  * @param curr The current action, which prompts the creation of an UNINIT action
2572  * @return A pointer to the UNINIT ModelAction
2573  */
2574 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2575 {
2576         Node *node = curr->get_node();
2577         ModelAction *act = node->get_uninit_action();
2578         if (!act) {
2579                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2580                 node->set_uninit_action(act);
2581         }
2582         act->create_cv(NULL);
2583         return act;
2584 }
2585
2586 static void print_list(const action_list_t *list)
2587 {
2588         action_list_t::const_iterator it;
2589
2590         model_print("---------------------------------------------------------------------\n");
2591
2592         unsigned int hash = 0;
2593
2594         for (it = list->begin(); it != list->end(); it++) {
2595                 const ModelAction *act = *it;
2596                 if (act->get_seq_number() > 0)
2597                         act->print();
2598                 hash = hash^(hash<<3)^((*it)->hash());
2599         }
2600         model_print("HASH %u\n", hash);
2601         model_print("---------------------------------------------------------------------\n");
2602 }
2603
2604 #if SUPPORT_MOD_ORDER_DUMP
2605 void ModelExecution::dumpGraph(char *filename) const
2606 {
2607         char buffer[200];
2608         sprintf(buffer, "%s.dot", filename);
2609         FILE *file = fopen(buffer, "w");
2610         fprintf(file, "digraph %s {\n", filename);
2611         mo_graph->dumpNodes(file);
2612         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2613
2614         for (action_list_t::iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2615                 ModelAction *act = *it;
2616                 if (act->is_read()) {
2617                         mo_graph->dot_print_node(file, act);
2618                         if (act->get_reads_from())
2619                                 mo_graph->dot_print_edge(file,
2620                                                 act->get_reads_from(),
2621                                                 act,
2622                                                 "label=\"rf\", color=red, weight=2");
2623                         else
2624                                 mo_graph->dot_print_edge(file,
2625                                                 act->get_reads_from_promise(),
2626                                                 act,
2627                                                 "label=\"rf\", color=red");
2628                 }
2629                 if (thread_array[act->get_tid()]) {
2630                         mo_graph->dot_print_edge(file,
2631                                         thread_array[id_to_int(act->get_tid())],
2632                                         act,
2633                                         "label=\"sb\", color=blue, weight=400");
2634                 }
2635
2636                 thread_array[act->get_tid()] = act;
2637         }
2638         fprintf(file, "}\n");
2639         model_free(thread_array);
2640         fclose(file);
2641 }
2642 #endif
2643
2644 /** @brief Prints an execution trace summary. */
2645 void ModelExecution::print_summary() const
2646 {
2647 #if SUPPORT_MOD_ORDER_DUMP
2648         char buffername[100];
2649         sprintf(buffername, "exec%04u", get_execution_number());
2650         mo_graph->dumpGraphToFile(buffername);
2651         sprintf(buffername, "graph%04u", get_execution_number());
2652         dumpGraph(buffername);
2653 #endif
2654
2655         model_print("Execution %d:", get_execution_number());
2656         if (isfeasibleprefix()) {
2657                 if (scheduler->all_threads_sleeping())
2658                         model_print(" SLEEP-SET REDUNDANT");
2659                 model_print("\n");
2660         } else
2661                 print_infeasibility(" INFEASIBLE");
2662         print_list(&action_trace);
2663         model_print("\n");
2664         if (!promises.empty()) {
2665                 model_print("Pending promises:\n");
2666                 for (unsigned int i = 0; i < promises.size(); i++) {
2667                         model_print(" [P%u] ", i);
2668                         promises[i]->print();
2669                 }
2670                 model_print("\n");
2671         }
2672 }
2673
2674 /**
2675  * Add a Thread to the system for the first time. Should only be called once
2676  * per thread.
2677  * @param t The Thread to add
2678  */
2679 void ModelExecution::add_thread(Thread *t)
2680 {
2681         unsigned int i = id_to_int(t->get_id());
2682         if (i >= thread_map.size())
2683                 thread_map.resize(i + 1);
2684         thread_map[i] = t;
2685         if (!t->is_model_thread())
2686                 scheduler->add_thread(t);
2687 }
2688
2689 /**
2690  * @brief Get a Thread reference by its ID
2691  * @param tid The Thread's ID
2692  * @return A Thread reference
2693  */
2694 Thread * ModelExecution::get_thread(thread_id_t tid) const
2695 {
2696         unsigned int i = id_to_int(tid);
2697         if (i < thread_map.size())
2698                 return thread_map[i];
2699         return NULL;
2700 }
2701
2702 /**
2703  * @brief Get a reference to the Thread in which a ModelAction was executed
2704  * @param act The ModelAction
2705  * @return A Thread reference
2706  */
2707 Thread * ModelExecution::get_thread(const ModelAction *act) const
2708 {
2709         return get_thread(act->get_tid());
2710 }
2711
2712 /**
2713  * @brief Get a Promise's "promise number"
2714  *
2715  * A "promise number" is an index number that is unique to a promise, valid
2716  * only for a specific snapshot of an execution trace. Promises may come and go
2717  * as they are generated an resolved, so an index only retains meaning for the
2718  * current snapshot.
2719  *
2720  * @param promise The Promise to check
2721  * @return The promise index, if the promise still is valid; otherwise -1
2722  */
2723 int ModelExecution::get_promise_number(const Promise *promise) const
2724 {
2725         for (unsigned int i = 0; i < promises.size(); i++)
2726                 if (promises[i] == promise)
2727                         return i;
2728         /* Not found */
2729         return -1;
2730 }
2731
2732 /**
2733  * @brief Check if a Thread is currently enabled
2734  * @param t The Thread to check
2735  * @return True if the Thread is currently enabled
2736  */
2737 bool ModelExecution::is_enabled(Thread *t) const
2738 {
2739         return scheduler->is_enabled(t);
2740 }
2741
2742 /**
2743  * @brief Check if a Thread is currently enabled
2744  * @param tid The ID of the Thread to check
2745  * @return True if the Thread is currently enabled
2746  */
2747 bool ModelExecution::is_enabled(thread_id_t tid) const
2748 {
2749         return scheduler->is_enabled(tid);
2750 }
2751
2752 /**
2753  * @brief Select the next thread to execute based on the curren action
2754  *
2755  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2756  * actions should be followed by the execution of their child thread. In either
2757  * case, the current action should determine the next thread schedule.
2758  *
2759  * @param curr The current action
2760  * @return The next thread to run, if the current action will determine this
2761  * selection; otherwise NULL
2762  */
2763 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2764 {
2765         /* Do not split atomic RMW */
2766         if (curr->is_rmwr())
2767                 return get_thread(curr);
2768         /* Follow CREATE with the created thread */
2769         if (curr->get_type() == THREAD_CREATE)
2770                 return curr->get_thread_operand();
2771         return NULL;
2772 }
2773
2774 /** @return True if the execution has taken too many steps */
2775 bool ModelExecution::too_many_steps() const
2776 {
2777         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2778 }
2779
2780 /**
2781  * Takes the next step in the execution, if possible.
2782  * @param curr The current step to take
2783  * @return Returns the next Thread to run, if any; NULL if this execution
2784  * should terminate
2785  */
2786 Thread * ModelExecution::take_step(ModelAction *curr)
2787 {
2788         Thread *curr_thrd = get_thread(curr);
2789         ASSERT(curr_thrd->get_state() == THREAD_READY);
2790
2791         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2792         curr = check_current_action(curr);
2793         ASSERT(curr);
2794
2795         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2796                 scheduler->remove_thread(curr_thrd);
2797
2798         return action_select_next_thread(curr);
2799 }
2800
2801 /**
2802  * Launch end-of-execution release sequence fixups only when
2803  * the execution is otherwise feasible AND there are:
2804  *
2805  * (1) pending release sequences
2806  * (2) pending assertions that could be invalidated by a change
2807  * in clock vectors (i.e., data races)
2808  * (3) no pending promises
2809  */
2810 void ModelExecution::fixup_release_sequences()
2811 {
2812         while (!pending_rel_seqs.empty() &&
2813                         is_feasible_prefix_ignore_relseq() &&
2814                         !unrealizedraces.empty()) {
2815                 model_print("*** WARNING: release sequence fixup action "
2816                                 "(%zu pending release seuqence(s)) ***\n",
2817                                 pending_rel_seqs.size());
2818                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2819                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2820                                 model_thread);
2821                 take_step(fixup);
2822         };
2823 }