execution: bugfix - backtrack seq-cst fences properly
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(),
68         thread_map(2), /* We'll always need at least 2 threads */
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         promises(),
73         futurevalues(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         add_thread(model_thread);
84         scheduler->register_engine(this);
85         node_stack->register_engine(this);
86 }
87
88 /** @brief Destructor */
89 ModelExecution::~ModelExecution()
90 {
91         for (unsigned int i = 0; i < get_num_threads(); i++)
92                 delete get_thread(int_to_id(i));
93
94         for (unsigned int i = 0; i < promises.size(); i++)
95                 delete promises[i];
96
97         delete mo_graph;
98         delete priv;
99 }
100
101 int ModelExecution::get_execution_number() const
102 {
103         return model->get_execution_number();
104 }
105
106 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
107 {
108         action_list_t *tmp = hash->get(ptr);
109         if (tmp == NULL) {
110                 tmp = new action_list_t();
111                 hash->put(ptr, tmp);
112         }
113         return tmp;
114 }
115
116 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
117 {
118         SnapVector<action_list_t> *tmp = hash->get(ptr);
119         if (tmp == NULL) {
120                 tmp = new SnapVector<action_list_t>();
121                 hash->put(ptr, tmp);
122         }
123         return tmp;
124 }
125
126 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
127 {
128         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
129         if (wrv==NULL)
130                 return NULL;
131         unsigned int thread=id_to_int(tid);
132         if (thread < wrv->size())
133                 return &(*wrv)[thread];
134         else
135                 return NULL;
136 }
137
138 /** @return a thread ID for a new Thread */
139 thread_id_t ModelExecution::get_next_id()
140 {
141         return priv->next_thread_id++;
142 }
143
144 /** @return the number of user threads created during this execution */
145 unsigned int ModelExecution::get_num_threads() const
146 {
147         return priv->next_thread_id;
148 }
149
150 /** @return a sequence number for a new ModelAction */
151 modelclock_t ModelExecution::get_next_seq_num()
152 {
153         return ++priv->used_sequence_numbers;
154 }
155
156 /**
157  * @brief Should the current action wake up a given thread?
158  *
159  * @param curr The current action
160  * @param thread The thread that we might wake up
161  * @return True, if we should wake up the sleeping thread; false otherwise
162  */
163 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
164 {
165         const ModelAction *asleep = thread->get_pending();
166         /* Don't allow partial RMW to wake anyone up */
167         if (curr->is_rmwr())
168                 return false;
169         /* Synchronizing actions may have been backtracked */
170         if (asleep->could_synchronize_with(curr))
171                 return true;
172         /* All acquire/release fences and fence-acquire/store-release */
173         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
174                 return true;
175         /* Fence-release + store can awake load-acquire on the same location */
176         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
177                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
178                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
179                         return true;
180         }
181         return false;
182 }
183
184 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
185 {
186         for (unsigned int i = 0; i < get_num_threads(); i++) {
187                 Thread *thr = get_thread(int_to_id(i));
188                 if (scheduler->is_sleep_set(thr)) {
189                         if (should_wake_up(curr, thr))
190                                 /* Remove this thread from sleep set */
191                                 scheduler->remove_sleep(thr);
192                 }
193         }
194 }
195
196 /** @brief Alert the model-checker that an incorrectly-ordered
197  * synchronization was made */
198 void ModelExecution::set_bad_synchronization()
199 {
200         priv->bad_synchronization = true;
201 }
202
203 bool ModelExecution::assert_bug(const char *msg)
204 {
205         priv->bugs.push_back(new bug_message(msg));
206
207         if (isfeasibleprefix()) {
208                 set_assert();
209                 return true;
210         }
211         return false;
212 }
213
214 /** @return True, if any bugs have been reported for this execution */
215 bool ModelExecution::have_bug_reports() const
216 {
217         return priv->bugs.size() != 0;
218 }
219
220 SnapVector<bug_message *> * ModelExecution::get_bugs() const
221 {
222         return &priv->bugs;
223 }
224
225 /**
226  * Check whether the current trace has triggered an assertion which should halt
227  * its execution.
228  *
229  * @return True, if the execution should be aborted; false otherwise
230  */
231 bool ModelExecution::has_asserted() const
232 {
233         return priv->asserted;
234 }
235
236 /**
237  * Trigger a trace assertion which should cause this execution to be halted.
238  * This can be due to a detected bug or due to an infeasibility that should
239  * halt ASAP.
240  */
241 void ModelExecution::set_assert()
242 {
243         priv->asserted = true;
244 }
245
246 /**
247  * Check if we are in a deadlock. Should only be called at the end of an
248  * execution, although it should not give false positives in the middle of an
249  * execution (there should be some ENABLED thread).
250  *
251  * @return True if program is in a deadlock; false otherwise
252  */
253 bool ModelExecution::is_deadlocked() const
254 {
255         bool blocking_threads = false;
256         for (unsigned int i = 0; i < get_num_threads(); i++) {
257                 thread_id_t tid = int_to_id(i);
258                 if (is_enabled(tid))
259                         return false;
260                 Thread *t = get_thread(tid);
261                 if (!t->is_model_thread() && t->get_pending())
262                         blocking_threads = true;
263         }
264         return blocking_threads;
265 }
266
267 /**
268  * Check if this is a complete execution. That is, have all thread completed
269  * execution (rather than exiting because sleep sets have forced a redundant
270  * execution).
271  *
272  * @return True if the execution is complete.
273  */
274 bool ModelExecution::is_complete_execution() const
275 {
276         for (unsigned int i = 0; i < get_num_threads(); i++)
277                 if (is_enabled(int_to_id(i)))
278                         return false;
279         return true;
280 }
281
282 /**
283  * @brief Find the last fence-related backtracking conflict for a ModelAction
284  *
285  * This function performs the search for the most recent conflicting action
286  * against which we should perform backtracking, as affected by fence
287  * operations. This includes pairs of potentially-synchronizing actions which
288  * occur due to fence-acquire or fence-release, and hence should be explored in
289  * the opposite execution order.
290  *
291  * @param act The current action
292  * @return The most recent action which conflicts with act due to fences
293  */
294 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
295 {
296         /* Only perform release/acquire fence backtracking for stores */
297         if (!act->is_write())
298                 return NULL;
299
300         /* Find a fence-release (or, act is a release) */
301         ModelAction *last_release;
302         if (act->is_release())
303                 last_release = act;
304         else
305                 last_release = get_last_fence_release(act->get_tid());
306         if (!last_release)
307                 return NULL;
308
309         /* Skip past the release */
310         const action_list_t *list = &action_trace;
311         action_list_t::const_reverse_iterator rit;
312         for (rit = list->rbegin(); rit != list->rend(); rit++)
313                 if (*rit == last_release)
314                         break;
315         ASSERT(rit != list->rend());
316
317         /* Find a prior:
318          *   load-acquire
319          * or
320          *   load --sb-> fence-acquire */
321         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
322         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
323         bool found_acquire_fences = false;
324         for ( ; rit != list->rend(); rit++) {
325                 ModelAction *prev = *rit;
326                 if (act->same_thread(prev))
327                         continue;
328
329                 int tid = id_to_int(prev->get_tid());
330
331                 if (prev->is_read() && act->same_var(prev)) {
332                         if (prev->is_acquire()) {
333                                 /* Found most recent load-acquire, don't need
334                                  * to search for more fences */
335                                 if (!found_acquire_fences)
336                                         return NULL;
337                         } else {
338                                 prior_loads[tid] = prev;
339                         }
340                 }
341                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
342                         found_acquire_fences = true;
343                         acquire_fences[tid] = prev;
344                 }
345         }
346
347         ModelAction *latest_backtrack = NULL;
348         for (unsigned int i = 0; i < acquire_fences.size(); i++)
349                 if (acquire_fences[i] && prior_loads[i])
350                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
351                                 latest_backtrack = acquire_fences[i];
352         return latest_backtrack;
353 }
354
355 /**
356  * @brief Find the last backtracking conflict for a ModelAction
357  *
358  * This function performs the search for the most recent conflicting action
359  * against which we should perform backtracking. This primary includes pairs of
360  * synchronizing actions which should be explored in the opposite execution
361  * order.
362  *
363  * @param act The current action
364  * @return The most recent action which conflicts with act
365  */
366 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
367 {
368         switch (act->get_type()) {
369         case ATOMIC_FENCE:
370                 /* Only seq-cst fences can (directly) cause backtracking */
371                 if (!act->is_seqcst())
372                         break;
373         case ATOMIC_READ:
374         case ATOMIC_WRITE:
375         case ATOMIC_RMW: {
376                 ModelAction *ret = NULL;
377
378                 /* linear search: from most recent to oldest */
379                 action_list_t *list = obj_map.get(act->get_location());
380                 action_list_t::reverse_iterator rit;
381                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
382                         ModelAction *prev = *rit;
383                         if (prev->could_synchronize_with(act)) {
384                                 ret = prev;
385                                 break;
386                         }
387                 }
388
389                 ModelAction *ret2 = get_last_fence_conflict(act);
390                 if (!ret2)
391                         return ret;
392                 if (!ret)
393                         return ret2;
394                 if (*ret < *ret2)
395                         return ret2;
396                 return ret;
397         }
398         case ATOMIC_LOCK:
399         case ATOMIC_TRYLOCK: {
400                 /* linear search: from most recent to oldest */
401                 action_list_t *list = obj_map.get(act->get_location());
402                 action_list_t::reverse_iterator rit;
403                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
404                         ModelAction *prev = *rit;
405                         if (act->is_conflicting_lock(prev))
406                                 return prev;
407                 }
408                 break;
409         }
410         case ATOMIC_UNLOCK: {
411                 /* linear search: from most recent to oldest */
412                 action_list_t *list = obj_map.get(act->get_location());
413                 action_list_t::reverse_iterator rit;
414                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
415                         ModelAction *prev = *rit;
416                         if (!act->same_thread(prev) && prev->is_failed_trylock())
417                                 return prev;
418                 }
419                 break;
420         }
421         case ATOMIC_WAIT: {
422                 /* linear search: from most recent to oldest */
423                 action_list_t *list = obj_map.get(act->get_location());
424                 action_list_t::reverse_iterator rit;
425                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
426                         ModelAction *prev = *rit;
427                         if (!act->same_thread(prev) && prev->is_failed_trylock())
428                                 return prev;
429                         if (!act->same_thread(prev) && prev->is_notify())
430                                 return prev;
431                 }
432                 break;
433         }
434
435         case ATOMIC_NOTIFY_ALL:
436         case ATOMIC_NOTIFY_ONE: {
437                 /* linear search: from most recent to oldest */
438                 action_list_t *list = obj_map.get(act->get_location());
439                 action_list_t::reverse_iterator rit;
440                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
441                         ModelAction *prev = *rit;
442                         if (!act->same_thread(prev) && prev->is_wait())
443                                 return prev;
444                 }
445                 break;
446         }
447         default:
448                 break;
449         }
450         return NULL;
451 }
452
453 /** This method finds backtracking points where we should try to
454  * reorder the parameter ModelAction against.
455  *
456  * @param the ModelAction to find backtracking points for.
457  */
458 void ModelExecution::set_backtracking(ModelAction *act)
459 {
460         Thread *t = get_thread(act);
461         ModelAction *prev = get_last_conflict(act);
462         if (prev == NULL)
463                 return;
464
465         Node *node = prev->get_node()->get_parent();
466
467         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
468         int low_tid, high_tid;
469         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
470                 low_tid = id_to_int(act->get_tid());
471                 high_tid = low_tid + 1;
472         } else {
473                 low_tid = 0;
474                 high_tid = get_num_threads();
475         }
476
477         for (int i = low_tid; i < high_tid; i++) {
478                 thread_id_t tid = int_to_id(i);
479
480                 /* Make sure this thread can be enabled here. */
481                 if (i >= node->get_num_threads())
482                         break;
483
484                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
485                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
486                 if (node->enabled_status(tid) != THREAD_ENABLED)
487                         continue;
488
489                 /* Check if this has been explored already */
490                 if (node->has_been_explored(tid))
491                         continue;
492
493                 /* See if fairness allows */
494                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
495                         bool unfair = false;
496                         for (int t = 0; t < node->get_num_threads(); t++) {
497                                 thread_id_t tother = int_to_id(t);
498                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
499                                         unfair = true;
500                                         break;
501                                 }
502                         }
503                         if (unfair)
504                                 continue;
505                 }
506
507                 /* See if CHESS-like yield fairness allows */
508                 if (params->yieldon) {
509                         bool unfair = false;
510                         for (int t = 0; t < node->get_num_threads(); t++) {
511                                 thread_id_t tother = int_to_id(t);
512                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
513                                         unfair = true;
514                                         break;
515                                 }
516                         }
517                         if (unfair)
518                                 continue;
519                 }
520
521                 /* Cache the latest backtracking point */
522                 set_latest_backtrack(prev);
523
524                 /* If this is a new backtracking point, mark the tree */
525                 if (!node->set_backtrack(tid))
526                         continue;
527                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
528                                         id_to_int(prev->get_tid()),
529                                         id_to_int(t->get_id()));
530                 if (DBG_ENABLED()) {
531                         prev->print();
532                         act->print();
533                 }
534         }
535 }
536
537 /**
538  * @brief Cache the a backtracking point as the "most recent", if eligible
539  *
540  * Note that this does not prepare the NodeStack for this backtracking
541  * operation, it only caches the action on a per-execution basis
542  *
543  * @param act The operation at which we should explore a different next action
544  * (i.e., backtracking point)
545  * @return True, if this action is now the most recent backtracking point;
546  * false otherwise
547  */
548 bool ModelExecution::set_latest_backtrack(ModelAction *act)
549 {
550         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
551                 priv->next_backtrack = act;
552                 return true;
553         }
554         return false;
555 }
556
557 /**
558  * Returns last backtracking point. The model checker will explore a different
559  * path for this point in the next execution.
560  * @return The ModelAction at which the next execution should diverge.
561  */
562 ModelAction * ModelExecution::get_next_backtrack()
563 {
564         ModelAction *next = priv->next_backtrack;
565         priv->next_backtrack = NULL;
566         return next;
567 }
568
569 /**
570  * Processes a read model action.
571  * @param curr is the read model action to process.
572  * @return True if processing this read updates the mo_graph.
573  */
574 bool ModelExecution::process_read(ModelAction *curr)
575 {
576         Node *node = curr->get_node();
577         while (true) {
578                 bool updated = false;
579                 switch (node->get_read_from_status()) {
580                 case READ_FROM_PAST: {
581                         const ModelAction *rf = node->get_read_from_past();
582                         ASSERT(rf);
583
584                         mo_graph->startChanges();
585
586                         ASSERT(!is_infeasible());
587                         if (!check_recency(curr, rf)) {
588                                 if (node->increment_read_from()) {
589                                         mo_graph->rollbackChanges();
590                                         continue;
591                                 } else {
592                                         priv->too_many_reads = true;
593                                 }
594                         }
595
596                         updated = r_modification_order(curr, rf);
597                         read_from(curr, rf);
598                         mo_graph->commitChanges();
599                         mo_check_promises(curr, true);
600                         break;
601                 }
602                 case READ_FROM_PROMISE: {
603                         Promise *promise = curr->get_node()->get_read_from_promise();
604                         if (promise->add_reader(curr))
605                                 priv->failed_promise = true;
606                         curr->set_read_from_promise(promise);
607                         mo_graph->startChanges();
608                         if (!check_recency(curr, promise))
609                                 priv->too_many_reads = true;
610                         updated = r_modification_order(curr, promise);
611                         mo_graph->commitChanges();
612                         break;
613                 }
614                 case READ_FROM_FUTURE: {
615                         /* Read from future value */
616                         struct future_value fv = node->get_future_value();
617                         Promise *promise = new Promise(this, curr, fv);
618                         curr->set_read_from_promise(promise);
619                         promises.push_back(promise);
620                         mo_graph->startChanges();
621                         updated = r_modification_order(curr, promise);
622                         mo_graph->commitChanges();
623                         break;
624                 }
625                 default:
626                         ASSERT(false);
627                 }
628                 get_thread(curr)->set_return_value(curr->get_return_value());
629                 return updated;
630         }
631 }
632
633 /**
634  * Processes a lock, trylock, or unlock model action.  @param curr is
635  * the read model action to process.
636  *
637  * The try lock operation checks whether the lock is taken.  If not,
638  * it falls to the normal lock operation case.  If so, it returns
639  * fail.
640  *
641  * The lock operation has already been checked that it is enabled, so
642  * it just grabs the lock and synchronizes with the previous unlock.
643  *
644  * The unlock operation has to re-enable all of the threads that are
645  * waiting on the lock.
646  *
647  * @return True if synchronization was updated; false otherwise
648  */
649 bool ModelExecution::process_mutex(ModelAction *curr)
650 {
651         std::mutex *mutex = curr->get_mutex();
652         struct std::mutex_state *state = NULL;
653
654         if (mutex)
655                 state = mutex->get_state();
656
657         switch (curr->get_type()) {
658         case ATOMIC_TRYLOCK: {
659                 bool success = !state->locked;
660                 curr->set_try_lock(success);
661                 if (!success) {
662                         get_thread(curr)->set_return_value(0);
663                         break;
664                 }
665                 get_thread(curr)->set_return_value(1);
666         }
667                 //otherwise fall into the lock case
668         case ATOMIC_LOCK: {
669                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
670                         assert_bug("Lock access before initialization");
671                 state->locked = get_thread(curr);
672                 ModelAction *unlock = get_last_unlock(curr);
673                 //synchronize with the previous unlock statement
674                 if (unlock != NULL) {
675                         synchronize(unlock, curr);
676                         return true;
677                 }
678                 break;
679         }
680         case ATOMIC_WAIT:
681         case ATOMIC_UNLOCK: {
682                 /* wake up the other threads */
683                 for (unsigned int i = 0; i < get_num_threads(); i++) {
684                         Thread *t = get_thread(int_to_id(i));
685                         Thread *curr_thrd = get_thread(curr);
686                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
687                                 scheduler->wake(t);
688                 }
689
690                 /* unlock the lock - after checking who was waiting on it */
691                 state->locked = NULL;
692
693                 if (!curr->is_wait())
694                         break; /* The rest is only for ATOMIC_WAIT */
695
696                 /* Should we go to sleep? (simulate spurious failures) */
697                 if (curr->get_node()->get_misc() == 0) {
698                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
699                         /* disable us */
700                         scheduler->sleep(get_thread(curr));
701                 }
702                 break;
703         }
704         case ATOMIC_NOTIFY_ALL: {
705                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
706                 //activate all the waiting threads
707                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
708                         scheduler->wake(get_thread(*rit));
709                 }
710                 waiters->clear();
711                 break;
712         }
713         case ATOMIC_NOTIFY_ONE: {
714                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
715                 int wakeupthread = curr->get_node()->get_misc();
716                 action_list_t::iterator it = waiters->begin();
717                 advance(it, wakeupthread);
718                 scheduler->wake(get_thread(*it));
719                 waiters->erase(it);
720                 break;
721         }
722
723         default:
724                 ASSERT(0);
725         }
726         return false;
727 }
728
729 /**
730  * @brief Check if the current pending promises allow a future value to be sent
731  *
732  * If one of the following is true:
733  *  (a) there are no pending promises
734  *  (b) the reader and writer do not cross any promises
735  * Then, it is safe to pass a future value back now.
736  *
737  * Otherwise, we must save the pending future value until (a) or (b) is true
738  *
739  * @param writer The operation which sends the future value. Must be a write.
740  * @param reader The operation which will observe the value. Must be a read.
741  * @return True if the future value can be sent now; false if it must wait.
742  */
743 bool ModelExecution::promises_may_allow(const ModelAction *writer,
744                 const ModelAction *reader) const
745 {
746         if (promises.empty())
747                 return true;
748         for (int i = promises.size() - 1; i >= 0; i--) {
749                 ModelAction *pr = promises[i]->get_reader(0);
750                 //reader is after promise...doesn't cross any promise
751                 if (*reader > *pr)
752                         return true;
753                 //writer is after promise, reader before...bad...
754                 if (*writer > *pr)
755                         return false;
756         }
757         return true;
758 }
759
760 /**
761  * @brief Add a future value to a reader
762  *
763  * This function performs a few additional checks to ensure that the future
764  * value can be feasibly observed by the reader
765  *
766  * @param writer The operation whose value is sent. Must be a write.
767  * @param reader The read operation which may read the future value. Must be a read.
768  */
769 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
770 {
771         /* Do more ambitious checks now that mo is more complete */
772         if (!mo_may_allow(writer, reader))
773                 return;
774
775         Node *node = reader->get_node();
776
777         /* Find an ancestor thread which exists at the time of the reader */
778         Thread *write_thread = get_thread(writer);
779         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
780                 write_thread = write_thread->get_parent();
781
782         struct future_value fv = {
783                 writer->get_write_value(),
784                 writer->get_seq_number() + params->maxfuturedelay,
785                 write_thread->get_id(),
786         };
787         if (node->add_future_value(fv))
788                 set_latest_backtrack(reader);
789 }
790
791 /**
792  * Process a write ModelAction
793  * @param curr The ModelAction to process
794  * @return True if the mo_graph was updated or promises were resolved
795  */
796 bool ModelExecution::process_write(ModelAction *curr)
797 {
798         /* Readers to which we may send our future value */
799         ModelVector<ModelAction *> send_fv;
800
801         const ModelAction *earliest_promise_reader;
802         bool updated_promises = false;
803
804         bool updated_mod_order = w_modification_order(curr, &send_fv);
805         Promise *promise = pop_promise_to_resolve(curr);
806
807         if (promise) {
808                 earliest_promise_reader = promise->get_reader(0);
809                 updated_promises = resolve_promise(curr, promise);
810         } else
811                 earliest_promise_reader = NULL;
812
813         for (unsigned int i = 0; i < send_fv.size(); i++) {
814                 ModelAction *read = send_fv[i];
815
816                 /* Don't send future values to reads after the Promise we resolve */
817                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
818                         /* Check if future value can be sent immediately */
819                         if (promises_may_allow(curr, read)) {
820                                 add_future_value(curr, read);
821                         } else {
822                                 futurevalues.push_back(PendingFutureValue(curr, read));
823                         }
824                 }
825         }
826
827         /* Check the pending future values */
828         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
829                 struct PendingFutureValue pfv = futurevalues[i];
830                 if (promises_may_allow(pfv.writer, pfv.reader)) {
831                         add_future_value(pfv.writer, pfv.reader);
832                         futurevalues.erase(futurevalues.begin() + i);
833                 }
834         }
835
836         mo_graph->commitChanges();
837         mo_check_promises(curr, false);
838
839         get_thread(curr)->set_return_value(VALUE_NONE);
840         return updated_mod_order || updated_promises;
841 }
842
843 /**
844  * Process a fence ModelAction
845  * @param curr The ModelAction to process
846  * @return True if synchronization was updated
847  */
848 bool ModelExecution::process_fence(ModelAction *curr)
849 {
850         /*
851          * fence-relaxed: no-op
852          * fence-release: only log the occurence (not in this function), for
853          *   use in later synchronization
854          * fence-acquire (this function): search for hypothetical release
855          *   sequences
856          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
857          */
858         bool updated = false;
859         if (curr->is_acquire()) {
860                 action_list_t *list = &action_trace;
861                 action_list_t::reverse_iterator rit;
862                 /* Find X : is_read(X) && X --sb-> curr */
863                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
864                         ModelAction *act = *rit;
865                         if (act == curr)
866                                 continue;
867                         if (act->get_tid() != curr->get_tid())
868                                 continue;
869                         /* Stop at the beginning of the thread */
870                         if (act->is_thread_start())
871                                 break;
872                         /* Stop once we reach a prior fence-acquire */
873                         if (act->is_fence() && act->is_acquire())
874                                 break;
875                         if (!act->is_read())
876                                 continue;
877                         /* read-acquire will find its own release sequences */
878                         if (act->is_acquire())
879                                 continue;
880
881                         /* Establish hypothetical release sequences */
882                         rel_heads_list_t release_heads;
883                         get_release_seq_heads(curr, act, &release_heads);
884                         for (unsigned int i = 0; i < release_heads.size(); i++)
885                                 synchronize(release_heads[i], curr);
886                         if (release_heads.size() != 0)
887                                 updated = true;
888                 }
889         }
890         return updated;
891 }
892
893 /**
894  * @brief Process the current action for thread-related activity
895  *
896  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
897  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
898  * synchronization, etc.  This function is a no-op for non-THREAD actions
899  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
900  *
901  * @param curr The current action
902  * @return True if synchronization was updated or a thread completed
903  */
904 bool ModelExecution::process_thread_action(ModelAction *curr)
905 {
906         bool updated = false;
907
908         switch (curr->get_type()) {
909         case THREAD_CREATE: {
910                 thrd_t *thrd = (thrd_t *)curr->get_location();
911                 struct thread_params *params = (struct thread_params *)curr->get_value();
912                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
913                 add_thread(th);
914                 th->set_creation(curr);
915                 /* Promises can be satisfied by children */
916                 for (unsigned int i = 0; i < promises.size(); i++) {
917                         Promise *promise = promises[i];
918                         if (promise->thread_is_available(curr->get_tid()))
919                                 promise->add_thread(th->get_id());
920                 }
921                 break;
922         }
923         case THREAD_JOIN: {
924                 Thread *blocking = curr->get_thread_operand();
925                 ModelAction *act = get_last_action(blocking->get_id());
926                 synchronize(act, curr);
927                 updated = true; /* trigger rel-seq checks */
928                 break;
929         }
930         case THREAD_FINISH: {
931                 Thread *th = get_thread(curr);
932                 /* Wake up any joining threads */
933                 for (unsigned int i = 0; i < get_num_threads(); i++) {
934                         Thread *waiting = get_thread(int_to_id(i));
935                         if (waiting->waiting_on() == th &&
936                                         waiting->get_pending()->is_thread_join())
937                                 scheduler->wake(waiting);
938                 }
939                 th->complete();
940                 /* Completed thread can't satisfy promises */
941                 for (unsigned int i = 0; i < promises.size(); i++) {
942                         Promise *promise = promises[i];
943                         if (promise->thread_is_available(th->get_id()))
944                                 if (promise->eliminate_thread(th->get_id()))
945                                         priv->failed_promise = true;
946                 }
947                 updated = true; /* trigger rel-seq checks */
948                 break;
949         }
950         case THREAD_START: {
951                 check_promises(curr->get_tid(), NULL, curr->get_cv());
952                 break;
953         }
954         default:
955                 break;
956         }
957
958         return updated;
959 }
960
961 /**
962  * @brief Process the current action for release sequence fixup activity
963  *
964  * Performs model-checker release sequence fixups for the current action,
965  * forcing a single pending release sequence to break (with a given, potential
966  * "loose" write) or to complete (i.e., synchronize). If a pending release
967  * sequence forms a complete release sequence, then we must perform the fixup
968  * synchronization, mo_graph additions, etc.
969  *
970  * @param curr The current action; must be a release sequence fixup action
971  * @param work_queue The work queue to which to add work items as they are
972  * generated
973  */
974 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
975 {
976         const ModelAction *write = curr->get_node()->get_relseq_break();
977         struct release_seq *sequence = pending_rel_seqs.back();
978         pending_rel_seqs.pop_back();
979         ASSERT(sequence);
980         ModelAction *acquire = sequence->acquire;
981         const ModelAction *rf = sequence->rf;
982         const ModelAction *release = sequence->release;
983         ASSERT(acquire);
984         ASSERT(release);
985         ASSERT(rf);
986         ASSERT(release->same_thread(rf));
987
988         if (write == NULL) {
989                 /**
990                  * @todo Forcing a synchronization requires that we set
991                  * modification order constraints. For instance, we can't allow
992                  * a fixup sequence in which two separate read-acquire
993                  * operations read from the same sequence, where the first one
994                  * synchronizes and the other doesn't. Essentially, we can't
995                  * allow any writes to insert themselves between 'release' and
996                  * 'rf'
997                  */
998
999                 /* Must synchronize */
1000                 if (!synchronize(release, acquire))
1001                         return;
1002                 /* Re-check all pending release sequences */
1003                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1004                 /* Re-check act for mo_graph edges */
1005                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1006
1007                 /* propagate synchronization to later actions */
1008                 action_list_t::reverse_iterator rit = action_trace.rbegin();
1009                 for (; (*rit) != acquire; rit++) {
1010                         ModelAction *propagate = *rit;
1011                         if (acquire->happens_before(propagate)) {
1012                                 synchronize(acquire, propagate);
1013                                 /* Re-check 'propagate' for mo_graph edges */
1014                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1015                         }
1016                 }
1017         } else {
1018                 /* Break release sequence with new edges:
1019                  *   release --mo--> write --mo--> rf */
1020                 mo_graph->addEdge(release, write);
1021                 mo_graph->addEdge(write, rf);
1022         }
1023
1024         /* See if we have realized a data race */
1025         checkDataRaces();
1026 }
1027
1028 /**
1029  * Initialize the current action by performing one or more of the following
1030  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1031  * in the NodeStack, manipulating backtracking sets, allocating and
1032  * initializing clock vectors, and computing the promises to fulfill.
1033  *
1034  * @param curr The current action, as passed from the user context; may be
1035  * freed/invalidated after the execution of this function, with a different
1036  * action "returned" its place (pass-by-reference)
1037  * @return True if curr is a newly-explored action; false otherwise
1038  */
1039 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1040 {
1041         ModelAction *newcurr;
1042
1043         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1044                 newcurr = process_rmw(*curr);
1045                 delete *curr;
1046
1047                 if (newcurr->is_rmw())
1048                         compute_promises(newcurr);
1049
1050                 *curr = newcurr;
1051                 return false;
1052         }
1053
1054         (*curr)->set_seq_number(get_next_seq_num());
1055
1056         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1057         if (newcurr) {
1058                 /* First restore type and order in case of RMW operation */
1059                 if ((*curr)->is_rmwr())
1060                         newcurr->copy_typeandorder(*curr);
1061
1062                 ASSERT((*curr)->get_location() == newcurr->get_location());
1063                 newcurr->copy_from_new(*curr);
1064
1065                 /* Discard duplicate ModelAction; use action from NodeStack */
1066                 delete *curr;
1067
1068                 /* Always compute new clock vector */
1069                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1070
1071                 *curr = newcurr;
1072                 return false; /* Action was explored previously */
1073         } else {
1074                 newcurr = *curr;
1075
1076                 /* Always compute new clock vector */
1077                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1078
1079                 /* Assign most recent release fence */
1080                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1081
1082                 /*
1083                  * Perform one-time actions when pushing new ModelAction onto
1084                  * NodeStack
1085                  */
1086                 if (newcurr->is_write())
1087                         compute_promises(newcurr);
1088                 else if (newcurr->is_relseq_fixup())
1089                         compute_relseq_breakwrites(newcurr);
1090                 else if (newcurr->is_wait())
1091                         newcurr->get_node()->set_misc_max(2);
1092                 else if (newcurr->is_notify_one()) {
1093                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1094                 }
1095                 return true; /* This was a new ModelAction */
1096         }
1097 }
1098
1099 /**
1100  * @brief Establish reads-from relation between two actions
1101  *
1102  * Perform basic operations involved with establishing a concrete rf relation,
1103  * including setting the ModelAction data and checking for release sequences.
1104  *
1105  * @param act The action that is reading (must be a read)
1106  * @param rf The action from which we are reading (must be a write)
1107  *
1108  * @return True if this read established synchronization
1109  */
1110 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1111 {
1112         ASSERT(rf);
1113         ASSERT(rf->is_write());
1114
1115         act->set_read_from(rf);
1116         if (act->is_acquire()) {
1117                 rel_heads_list_t release_heads;
1118                 get_release_seq_heads(act, act, &release_heads);
1119                 int num_heads = release_heads.size();
1120                 for (unsigned int i = 0; i < release_heads.size(); i++)
1121                         if (!synchronize(release_heads[i], act))
1122                                 num_heads--;
1123                 return num_heads > 0;
1124         }
1125         return false;
1126 }
1127
1128 /**
1129  * @brief Synchronizes two actions
1130  *
1131  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1132  * This function performs the synchronization as well as providing other hooks
1133  * for other checks along with synchronization.
1134  *
1135  * @param first The left-hand side of the synchronizes-with relation
1136  * @param second The right-hand side of the synchronizes-with relation
1137  * @return True if the synchronization was successful (i.e., was consistent
1138  * with the execution order); false otherwise
1139  */
1140 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1141 {
1142         if (*second < *first) {
1143                 set_bad_synchronization();
1144                 return false;
1145         }
1146         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1147         return second->synchronize_with(first);
1148 }
1149
1150 /**
1151  * Check promises and eliminate potentially-satisfying threads when a thread is
1152  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1153  * no longer satisfy a promise generated from that thread.
1154  *
1155  * @param blocker The thread on which a thread is waiting
1156  * @param waiting The waiting thread
1157  */
1158 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1159 {
1160         for (unsigned int i = 0; i < promises.size(); i++) {
1161                 Promise *promise = promises[i];
1162                 if (!promise->thread_is_available(waiting->get_id()))
1163                         continue;
1164                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1165                         ModelAction *reader = promise->get_reader(j);
1166                         if (reader->get_tid() != blocker->get_id())
1167                                 continue;
1168                         if (promise->eliminate_thread(waiting->get_id())) {
1169                                 /* Promise has failed */
1170                                 priv->failed_promise = true;
1171                         } else {
1172                                 /* Only eliminate the 'waiting' thread once */
1173                                 return;
1174                         }
1175                 }
1176         }
1177 }
1178
1179 /**
1180  * @brief Check whether a model action is enabled.
1181  *
1182  * Checks whether a lock or join operation would be successful (i.e., is the
1183  * lock already locked, or is the joined thread already complete). If not, put
1184  * the action in a waiter list.
1185  *
1186  * @param curr is the ModelAction to check whether it is enabled.
1187  * @return a bool that indicates whether the action is enabled.
1188  */
1189 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1190         if (curr->is_lock()) {
1191                 std::mutex *lock = curr->get_mutex();
1192                 struct std::mutex_state *state = lock->get_state();
1193                 if (state->locked)
1194                         return false;
1195         } else if (curr->is_thread_join()) {
1196                 Thread *blocking = curr->get_thread_operand();
1197                 if (!blocking->is_complete()) {
1198                         thread_blocking_check_promises(blocking, get_thread(curr));
1199                         return false;
1200                 }
1201         }
1202
1203         return true;
1204 }
1205
1206 /**
1207  * This is the heart of the model checker routine. It performs model-checking
1208  * actions corresponding to a given "current action." Among other processes, it
1209  * calculates reads-from relationships, updates synchronization clock vectors,
1210  * forms a memory_order constraints graph, and handles replay/backtrack
1211  * execution when running permutations of previously-observed executions.
1212  *
1213  * @param curr The current action to process
1214  * @return The ModelAction that is actually executed; may be different than
1215  * curr
1216  */
1217 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1218 {
1219         ASSERT(curr);
1220         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1221         bool newly_explored = initialize_curr_action(&curr);
1222
1223         DBG();
1224
1225         wake_up_sleeping_actions(curr);
1226
1227         /* Compute fairness information for CHESS yield algorithm */
1228         if (params->yieldon) {
1229                 curr->get_node()->update_yield(scheduler);
1230         }
1231
1232         /* Add the action to lists before any other model-checking tasks */
1233         if (!second_part_of_rmw)
1234                 add_action_to_lists(curr);
1235
1236         /* Build may_read_from set for newly-created actions */
1237         if (newly_explored && curr->is_read())
1238                 build_may_read_from(curr);
1239
1240         /* Initialize work_queue with the "current action" work */
1241         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1242         while (!work_queue.empty() && !has_asserted()) {
1243                 WorkQueueEntry work = work_queue.front();
1244                 work_queue.pop_front();
1245
1246                 switch (work.type) {
1247                 case WORK_CHECK_CURR_ACTION: {
1248                         ModelAction *act = work.action;
1249                         bool update = false; /* update this location's release seq's */
1250                         bool update_all = false; /* update all release seq's */
1251
1252                         if (process_thread_action(curr))
1253                                 update_all = true;
1254
1255                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1256                                 update = true;
1257
1258                         if (act->is_write() && process_write(act))
1259                                 update = true;
1260
1261                         if (act->is_fence() && process_fence(act))
1262                                 update_all = true;
1263
1264                         if (act->is_mutex_op() && process_mutex(act))
1265                                 update_all = true;
1266
1267                         if (act->is_relseq_fixup())
1268                                 process_relseq_fixup(curr, &work_queue);
1269
1270                         if (update_all)
1271                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1272                         else if (update)
1273                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1274                         break;
1275                 }
1276                 case WORK_CHECK_RELEASE_SEQ:
1277                         resolve_release_sequences(work.location, &work_queue);
1278                         break;
1279                 case WORK_CHECK_MO_EDGES: {
1280                         /** @todo Complete verification of work_queue */
1281                         ModelAction *act = work.action;
1282                         bool updated = false;
1283
1284                         if (act->is_read()) {
1285                                 const ModelAction *rf = act->get_reads_from();
1286                                 const Promise *promise = act->get_reads_from_promise();
1287                                 if (rf) {
1288                                         if (r_modification_order(act, rf))
1289                                                 updated = true;
1290                                 } else if (promise) {
1291                                         if (r_modification_order(act, promise))
1292                                                 updated = true;
1293                                 }
1294                         }
1295                         if (act->is_write()) {
1296                                 if (w_modification_order(act, NULL))
1297                                         updated = true;
1298                         }
1299                         mo_graph->commitChanges();
1300
1301                         if (updated)
1302                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1303                         break;
1304                 }
1305                 default:
1306                         ASSERT(false);
1307                         break;
1308                 }
1309         }
1310
1311         check_curr_backtracking(curr);
1312         set_backtracking(curr);
1313         return curr;
1314 }
1315
1316 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1317 {
1318         Node *currnode = curr->get_node();
1319         Node *parnode = currnode->get_parent();
1320
1321         if ((parnode && !parnode->backtrack_empty()) ||
1322                          !currnode->misc_empty() ||
1323                          !currnode->read_from_empty() ||
1324                          !currnode->promise_empty() ||
1325                          !currnode->relseq_break_empty()) {
1326                 set_latest_backtrack(curr);
1327         }
1328 }
1329
1330 bool ModelExecution::promises_expired() const
1331 {
1332         for (unsigned int i = 0; i < promises.size(); i++) {
1333                 Promise *promise = promises[i];
1334                 if (promise->get_expiration() < priv->used_sequence_numbers)
1335                         return true;
1336         }
1337         return false;
1338 }
1339
1340 /**
1341  * This is the strongest feasibility check available.
1342  * @return whether the current trace (partial or complete) must be a prefix of
1343  * a feasible trace.
1344  */
1345 bool ModelExecution::isfeasibleprefix() const
1346 {
1347         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1348 }
1349
1350 /**
1351  * Print disagnostic information about an infeasible execution
1352  * @param prefix A string to prefix the output with; if NULL, then a default
1353  * message prefix will be provided
1354  */
1355 void ModelExecution::print_infeasibility(const char *prefix) const
1356 {
1357         char buf[100];
1358         char *ptr = buf;
1359         if (mo_graph->checkForCycles())
1360                 ptr += sprintf(ptr, "[mo cycle]");
1361         if (priv->failed_promise)
1362                 ptr += sprintf(ptr, "[failed promise]");
1363         if (priv->too_many_reads)
1364                 ptr += sprintf(ptr, "[too many reads]");
1365         if (priv->no_valid_reads)
1366                 ptr += sprintf(ptr, "[no valid reads-from]");
1367         if (priv->bad_synchronization)
1368                 ptr += sprintf(ptr, "[bad sw ordering]");
1369         if (promises_expired())
1370                 ptr += sprintf(ptr, "[promise expired]");
1371         if (promises.size() != 0)
1372                 ptr += sprintf(ptr, "[unresolved promise]");
1373         if (ptr != buf)
1374                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1375 }
1376
1377 /**
1378  * Returns whether the current completed trace is feasible, except for pending
1379  * release sequences.
1380  */
1381 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1382 {
1383         return !is_infeasible() && promises.size() == 0;
1384 }
1385
1386 /**
1387  * Check if the current partial trace is infeasible. Does not check any
1388  * end-of-execution flags, which might rule out the execution. Thus, this is
1389  * useful only for ruling an execution as infeasible.
1390  * @return whether the current partial trace is infeasible.
1391  */
1392 bool ModelExecution::is_infeasible() const
1393 {
1394         return mo_graph->checkForCycles() ||
1395                 priv->no_valid_reads ||
1396                 priv->failed_promise ||
1397                 priv->too_many_reads ||
1398                 priv->bad_synchronization ||
1399                 promises_expired();
1400 }
1401
1402 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1403 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1404         ModelAction *lastread = get_last_action(act->get_tid());
1405         lastread->process_rmw(act);
1406         if (act->is_rmw()) {
1407                 if (lastread->get_reads_from())
1408                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1409                 else
1410                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1411                 mo_graph->commitChanges();
1412         }
1413         return lastread;
1414 }
1415
1416 /**
1417  * A helper function for ModelExecution::check_recency, to check if the current
1418  * thread is able to read from a different write/promise for 'params.maxreads'
1419  * number of steps and if that write/promise should become visible (i.e., is
1420  * ordered later in the modification order). This helps model memory liveness.
1421  *
1422  * @param curr The current action. Must be a read.
1423  * @param rf The write/promise from which we plan to read
1424  * @param other_rf The write/promise from which we may read
1425  * @return True if we were able to read from other_rf for params.maxreads steps
1426  */
1427 template <typename T, typename U>
1428 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1429 {
1430         /* Need a different write/promise */
1431         if (other_rf->equals(rf))
1432                 return false;
1433
1434         /* Only look for "newer" writes/promises */
1435         if (!mo_graph->checkReachable(rf, other_rf))
1436                 return false;
1437
1438         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1439         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1440         action_list_t::reverse_iterator rit = list->rbegin();
1441         ASSERT((*rit) == curr);
1442         /* Skip past curr */
1443         rit++;
1444
1445         /* Does this write/promise work for everyone? */
1446         for (int i = 0; i < params->maxreads; i++, rit++) {
1447                 ModelAction *act = *rit;
1448                 if (!act->may_read_from(other_rf))
1449                         return false;
1450         }
1451         return true;
1452 }
1453
1454 /**
1455  * Checks whether a thread has read from the same write or Promise for too many
1456  * times without seeing the effects of a later write/Promise.
1457  *
1458  * Basic idea:
1459  * 1) there must a different write/promise that we could read from,
1460  * 2) we must have read from the same write/promise in excess of maxreads times,
1461  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1462  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1463  *
1464  * If so, we decide that the execution is no longer feasible.
1465  *
1466  * @param curr The current action. Must be a read.
1467  * @param rf The ModelAction/Promise from which we might read.
1468  * @return True if the read should succeed; false otherwise
1469  */
1470 template <typename T>
1471 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1472 {
1473         if (!params->maxreads)
1474                 return true;
1475
1476         //NOTE: Next check is just optimization, not really necessary....
1477         if (curr->get_node()->get_read_from_past_size() +
1478                         curr->get_node()->get_read_from_promise_size() <= 1)
1479                 return true;
1480
1481         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1482         int tid = id_to_int(curr->get_tid());
1483         ASSERT(tid < (int)thrd_lists->size());
1484         action_list_t *list = &(*thrd_lists)[tid];
1485         action_list_t::reverse_iterator rit = list->rbegin();
1486         ASSERT((*rit) == curr);
1487         /* Skip past curr */
1488         rit++;
1489
1490         action_list_t::reverse_iterator ritcopy = rit;
1491         /* See if we have enough reads from the same value */
1492         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1493                 if (ritcopy == list->rend())
1494                         return true;
1495                 ModelAction *act = *ritcopy;
1496                 if (!act->is_read())
1497                         return true;
1498                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1499                         return true;
1500                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1501                         return true;
1502                 if (act->get_node()->get_read_from_past_size() +
1503                                 act->get_node()->get_read_from_promise_size() <= 1)
1504                         return true;
1505         }
1506         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1507                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1508                 if (should_read_instead(curr, rf, write))
1509                         return false; /* liveness failure */
1510         }
1511         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1512                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1513                 if (should_read_instead(curr, rf, promise))
1514                         return false; /* liveness failure */
1515         }
1516         return true;
1517 }
1518
1519 /**
1520  * @brief Updates the mo_graph with the constraints imposed from the current
1521  * read.
1522  *
1523  * Basic idea is the following: Go through each other thread and find
1524  * the last action that happened before our read.  Two cases:
1525  *
1526  * -# The action is a write: that write must either occur before
1527  * the write we read from or be the write we read from.
1528  * -# The action is a read: the write that that action read from
1529  * must occur before the write we read from or be the same write.
1530  *
1531  * @param curr The current action. Must be a read.
1532  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1533  * @return True if modification order edges were added; false otherwise
1534  */
1535 template <typename rf_type>
1536 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1537 {
1538         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1539         unsigned int i;
1540         bool added = false;
1541         ASSERT(curr->is_read());
1542
1543         /* Last SC fence in the current thread */
1544         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1545         ModelAction *last_sc_write = NULL;
1546         if (curr->is_seqcst())
1547                 last_sc_write = get_last_seq_cst_write(curr);
1548
1549         /* Iterate over all threads */
1550         for (i = 0; i < thrd_lists->size(); i++) {
1551                 /* Last SC fence in thread i */
1552                 ModelAction *last_sc_fence_thread_local = NULL;
1553                 if (int_to_id((int)i) != curr->get_tid())
1554                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1555
1556                 /* Last SC fence in thread i, before last SC fence in current thread */
1557                 ModelAction *last_sc_fence_thread_before = NULL;
1558                 if (last_sc_fence_local)
1559                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1560
1561                 /* Iterate over actions in thread, starting from most recent */
1562                 action_list_t *list = &(*thrd_lists)[i];
1563                 action_list_t::reverse_iterator rit;
1564                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1565                         ModelAction *act = *rit;
1566
1567                         /* Skip curr */
1568                         if (act == curr)
1569                                 continue;
1570                         /* Don't want to add reflexive edges on 'rf' */
1571                         if (act->equals(rf)) {
1572                                 if (act->happens_before(curr))
1573                                         break;
1574                                 else
1575                                         continue;
1576                         }
1577
1578                         if (act->is_write()) {
1579                                 /* C++, Section 29.3 statement 5 */
1580                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1581                                                 *act < *last_sc_fence_thread_local) {
1582                                         added = mo_graph->addEdge(act, rf) || added;
1583                                         break;
1584                                 }
1585                                 /* C++, Section 29.3 statement 4 */
1586                                 else if (act->is_seqcst() && last_sc_fence_local &&
1587                                                 *act < *last_sc_fence_local) {
1588                                         added = mo_graph->addEdge(act, rf) || added;
1589                                         break;
1590                                 }
1591                                 /* C++, Section 29.3 statement 6 */
1592                                 else if (last_sc_fence_thread_before &&
1593                                                 *act < *last_sc_fence_thread_before) {
1594                                         added = mo_graph->addEdge(act, rf) || added;
1595                                         break;
1596                                 }
1597                         }
1598
1599                         /* C++, Section 29.3 statement 3 (second subpoint) */
1600                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1601                                 added = mo_graph->addEdge(act, rf) || added;
1602                                 break;
1603                         }
1604
1605                         /*
1606                          * Include at most one act per-thread that "happens
1607                          * before" curr
1608                          */
1609                         if (act->happens_before(curr)) {
1610                                 if (act->is_write()) {
1611                                         added = mo_graph->addEdge(act, rf) || added;
1612                                 } else {
1613                                         const ModelAction *prevrf = act->get_reads_from();
1614                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1615                                         if (prevrf) {
1616                                                 if (!prevrf->equals(rf))
1617                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1618                                         } else if (!prevrf_promise->equals(rf)) {
1619                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1620                                         }
1621                                 }
1622                                 break;
1623                         }
1624                 }
1625         }
1626
1627         /*
1628          * All compatible, thread-exclusive promises must be ordered after any
1629          * concrete loads from the same thread
1630          */
1631         for (unsigned int i = 0; i < promises.size(); i++)
1632                 if (promises[i]->is_compatible_exclusive(curr))
1633                         added = mo_graph->addEdge(rf, promises[i]) || added;
1634
1635         return added;
1636 }
1637
1638 /**
1639  * Updates the mo_graph with the constraints imposed from the current write.
1640  *
1641  * Basic idea is the following: Go through each other thread and find
1642  * the lastest action that happened before our write.  Two cases:
1643  *
1644  * (1) The action is a write => that write must occur before
1645  * the current write
1646  *
1647  * (2) The action is a read => the write that that action read from
1648  * must occur before the current write.
1649  *
1650  * This method also handles two other issues:
1651  *
1652  * (I) Sequential Consistency: Making sure that if the current write is
1653  * seq_cst, that it occurs after the previous seq_cst write.
1654  *
1655  * (II) Sending the write back to non-synchronizing reads.
1656  *
1657  * @param curr The current action. Must be a write.
1658  * @param send_fv A vector for stashing reads to which we may pass our future
1659  * value. If NULL, then don't record any future values.
1660  * @return True if modification order edges were added; false otherwise
1661  */
1662 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1663 {
1664         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1665         unsigned int i;
1666         bool added = false;
1667         ASSERT(curr->is_write());
1668
1669         if (curr->is_seqcst()) {
1670                 /* We have to at least see the last sequentially consistent write,
1671                          so we are initialized. */
1672                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1673                 if (last_seq_cst != NULL) {
1674                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1675                 }
1676         }
1677
1678         /* Last SC fence in the current thread */
1679         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1680
1681         /* Iterate over all threads */
1682         for (i = 0; i < thrd_lists->size(); i++) {
1683                 /* Last SC fence in thread i, before last SC fence in current thread */
1684                 ModelAction *last_sc_fence_thread_before = NULL;
1685                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1686                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1687
1688                 /* Iterate over actions in thread, starting from most recent */
1689                 action_list_t *list = &(*thrd_lists)[i];
1690                 action_list_t::reverse_iterator rit;
1691                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1692                         ModelAction *act = *rit;
1693                         if (act == curr) {
1694                                 /*
1695                                  * 1) If RMW and it actually read from something, then we
1696                                  * already have all relevant edges, so just skip to next
1697                                  * thread.
1698                                  *
1699                                  * 2) If RMW and it didn't read from anything, we should
1700                                  * whatever edge we can get to speed up convergence.
1701                                  *
1702                                  * 3) If normal write, we need to look at earlier actions, so
1703                                  * continue processing list.
1704                                  */
1705                                 if (curr->is_rmw()) {
1706                                         if (curr->get_reads_from() != NULL)
1707                                                 break;
1708                                         else
1709                                                 continue;
1710                                 } else
1711                                         continue;
1712                         }
1713
1714                         /* C++, Section 29.3 statement 7 */
1715                         if (last_sc_fence_thread_before && act->is_write() &&
1716                                         *act < *last_sc_fence_thread_before) {
1717                                 added = mo_graph->addEdge(act, curr) || added;
1718                                 break;
1719                         }
1720
1721                         /*
1722                          * Include at most one act per-thread that "happens
1723                          * before" curr
1724                          */
1725                         if (act->happens_before(curr)) {
1726                                 /*
1727                                  * Note: if act is RMW, just add edge:
1728                                  *   act --mo--> curr
1729                                  * The following edge should be handled elsewhere:
1730                                  *   readfrom(act) --mo--> act
1731                                  */
1732                                 if (act->is_write())
1733                                         added = mo_graph->addEdge(act, curr) || added;
1734                                 else if (act->is_read()) {
1735                                         //if previous read accessed a null, just keep going
1736                                         if (act->get_reads_from() == NULL)
1737                                                 continue;
1738                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1739                                 }
1740                                 break;
1741                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1742                                                      !act->same_thread(curr)) {
1743                                 /* We have an action that:
1744                                    (1) did not happen before us
1745                                    (2) is a read and we are a write
1746                                    (3) cannot synchronize with us
1747                                    (4) is in a different thread
1748                                    =>
1749                                    that read could potentially read from our write.  Note that
1750                                    these checks are overly conservative at this point, we'll
1751                                    do more checks before actually removing the
1752                                    pendingfuturevalue.
1753
1754                                  */
1755                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1756                                         if (!is_infeasible())
1757                                                 send_fv->push_back(act);
1758                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1759                                                 add_future_value(curr, act);
1760                                 }
1761                         }
1762                 }
1763         }
1764
1765         /*
1766          * All compatible, thread-exclusive promises must be ordered after any
1767          * concrete stores to the same thread, or else they can be merged with
1768          * this store later
1769          */
1770         for (unsigned int i = 0; i < promises.size(); i++)
1771                 if (promises[i]->is_compatible_exclusive(curr))
1772                         added = mo_graph->addEdge(curr, promises[i]) || added;
1773
1774         return added;
1775 }
1776
1777 /** Arbitrary reads from the future are not allowed.  Section 29.3
1778  * part 9 places some constraints.  This method checks one result of constraint
1779  * constraint.  Others require compiler support. */
1780 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1781 {
1782         if (!writer->is_rmw())
1783                 return true;
1784
1785         if (!reader->is_rmw())
1786                 return true;
1787
1788         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1789                 if (search == reader)
1790                         return false;
1791                 if (search->get_tid() == reader->get_tid() &&
1792                                 search->happens_before(reader))
1793                         break;
1794         }
1795
1796         return true;
1797 }
1798
1799 /**
1800  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1801  * some constraints. This method checks one the following constraint (others
1802  * require compiler support):
1803  *
1804  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1805  */
1806 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1807 {
1808         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1809         unsigned int i;
1810         /* Iterate over all threads */
1811         for (i = 0; i < thrd_lists->size(); i++) {
1812                 const ModelAction *write_after_read = NULL;
1813
1814                 /* Iterate over actions in thread, starting from most recent */
1815                 action_list_t *list = &(*thrd_lists)[i];
1816                 action_list_t::reverse_iterator rit;
1817                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1818                         ModelAction *act = *rit;
1819
1820                         /* Don't disallow due to act == reader */
1821                         if (!reader->happens_before(act) || reader == act)
1822                                 break;
1823                         else if (act->is_write())
1824                                 write_after_read = act;
1825                         else if (act->is_read() && act->get_reads_from() != NULL)
1826                                 write_after_read = act->get_reads_from();
1827                 }
1828
1829                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1830                         return false;
1831         }
1832         return true;
1833 }
1834
1835 /**
1836  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1837  * The ModelAction under consideration is expected to be taking part in
1838  * release/acquire synchronization as an object of the "reads from" relation.
1839  * Note that this can only provide release sequence support for RMW chains
1840  * which do not read from the future, as those actions cannot be traced until
1841  * their "promise" is fulfilled. Similarly, we may not even establish the
1842  * presence of a release sequence with certainty, as some modification order
1843  * constraints may be decided further in the future. Thus, this function
1844  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1845  * and a boolean representing certainty.
1846  *
1847  * @param rf The action that might be part of a release sequence. Must be a
1848  * write.
1849  * @param release_heads A pass-by-reference style return parameter. After
1850  * execution of this function, release_heads will contain the heads of all the
1851  * relevant release sequences, if any exists with certainty
1852  * @param pending A pass-by-reference style return parameter which is only used
1853  * when returning false (i.e., uncertain). Returns most information regarding
1854  * an uncertain release sequence, including any write operations that might
1855  * break the sequence.
1856  * @return true, if the ModelExecution is certain that release_heads is complete;
1857  * false otherwise
1858  */
1859 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1860                 rel_heads_list_t *release_heads,
1861                 struct release_seq *pending) const
1862 {
1863         /* Only check for release sequences if there are no cycles */
1864         if (mo_graph->checkForCycles())
1865                 return false;
1866
1867         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1868                 ASSERT(rf->is_write());
1869
1870                 if (rf->is_release())
1871                         release_heads->push_back(rf);
1872                 else if (rf->get_last_fence_release())
1873                         release_heads->push_back(rf->get_last_fence_release());
1874                 if (!rf->is_rmw())
1875                         break; /* End of RMW chain */
1876
1877                 /** @todo Need to be smarter here...  In the linux lock
1878                  * example, this will run to the beginning of the program for
1879                  * every acquire. */
1880                 /** @todo The way to be smarter here is to keep going until 1
1881                  * thread has a release preceded by an acquire and you've seen
1882                  *       both. */
1883
1884                 /* acq_rel RMW is a sufficient stopping condition */
1885                 if (rf->is_acquire() && rf->is_release())
1886                         return true; /* complete */
1887         };
1888         if (!rf) {
1889                 /* read from future: need to settle this later */
1890                 pending->rf = NULL;
1891                 return false; /* incomplete */
1892         }
1893
1894         if (rf->is_release())
1895                 return true; /* complete */
1896
1897         /* else relaxed write
1898          * - check for fence-release in the same thread (29.8, stmt. 3)
1899          * - check modification order for contiguous subsequence
1900          *   -> rf must be same thread as release */
1901
1902         const ModelAction *fence_release = rf->get_last_fence_release();
1903         /* Synchronize with a fence-release unconditionally; we don't need to
1904          * find any more "contiguous subsequence..." for it */
1905         if (fence_release)
1906                 release_heads->push_back(fence_release);
1907
1908         int tid = id_to_int(rf->get_tid());
1909         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1910         action_list_t *list = &(*thrd_lists)[tid];
1911         action_list_t::const_reverse_iterator rit;
1912
1913         /* Find rf in the thread list */
1914         rit = std::find(list->rbegin(), list->rend(), rf);
1915         ASSERT(rit != list->rend());
1916
1917         /* Find the last {write,fence}-release */
1918         for (; rit != list->rend(); rit++) {
1919                 if (fence_release && *(*rit) < *fence_release)
1920                         break;
1921                 if ((*rit)->is_release())
1922                         break;
1923         }
1924         if (rit == list->rend()) {
1925                 /* No write-release in this thread */
1926                 return true; /* complete */
1927         } else if (fence_release && *(*rit) < *fence_release) {
1928                 /* The fence-release is more recent (and so, "stronger") than
1929                  * the most recent write-release */
1930                 return true; /* complete */
1931         } /* else, need to establish contiguous release sequence */
1932         ModelAction *release = *rit;
1933
1934         ASSERT(rf->same_thread(release));
1935
1936         pending->writes.clear();
1937
1938         bool certain = true;
1939         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1940                 if (id_to_int(rf->get_tid()) == (int)i)
1941                         continue;
1942                 list = &(*thrd_lists)[i];
1943
1944                 /* Can we ensure no future writes from this thread may break
1945                  * the release seq? */
1946                 bool future_ordered = false;
1947
1948                 ModelAction *last = get_last_action(int_to_id(i));
1949                 Thread *th = get_thread(int_to_id(i));
1950                 if ((last && rf->happens_before(last)) ||
1951                                 !is_enabled(th) ||
1952                                 th->is_complete())
1953                         future_ordered = true;
1954
1955                 ASSERT(!th->is_model_thread() || future_ordered);
1956
1957                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1958                         const ModelAction *act = *rit;
1959                         /* Reach synchronization -> this thread is complete */
1960                         if (act->happens_before(release))
1961                                 break;
1962                         if (rf->happens_before(act)) {
1963                                 future_ordered = true;
1964                                 continue;
1965                         }
1966
1967                         /* Only non-RMW writes can break release sequences */
1968                         if (!act->is_write() || act->is_rmw())
1969                                 continue;
1970
1971                         /* Check modification order */
1972                         if (mo_graph->checkReachable(rf, act)) {
1973                                 /* rf --mo--> act */
1974                                 future_ordered = true;
1975                                 continue;
1976                         }
1977                         if (mo_graph->checkReachable(act, release))
1978                                 /* act --mo--> release */
1979                                 break;
1980                         if (mo_graph->checkReachable(release, act) &&
1981                                       mo_graph->checkReachable(act, rf)) {
1982                                 /* release --mo-> act --mo--> rf */
1983                                 return true; /* complete */
1984                         }
1985                         /* act may break release sequence */
1986                         pending->writes.push_back(act);
1987                         certain = false;
1988                 }
1989                 if (!future_ordered)
1990                         certain = false; /* This thread is uncertain */
1991         }
1992
1993         if (certain) {
1994                 release_heads->push_back(release);
1995                 pending->writes.clear();
1996         } else {
1997                 pending->release = release;
1998                 pending->rf = rf;
1999         }
2000         return certain;
2001 }
2002
2003 /**
2004  * An interface for getting the release sequence head(s) with which a
2005  * given ModelAction must synchronize. This function only returns a non-empty
2006  * result when it can locate a release sequence head with certainty. Otherwise,
2007  * it may mark the internal state of the ModelExecution so that it will handle
2008  * the release sequence at a later time, causing @a acquire to update its
2009  * synchronization at some later point in execution.
2010  *
2011  * @param acquire The 'acquire' action that may synchronize with a release
2012  * sequence
2013  * @param read The read action that may read from a release sequence; this may
2014  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2015  * when 'acquire' is a fence-acquire)
2016  * @param release_heads A pass-by-reference return parameter. Will be filled
2017  * with the head(s) of the release sequence(s), if they exists with certainty.
2018  * @see ModelExecution::release_seq_heads
2019  */
2020 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2021                 ModelAction *read, rel_heads_list_t *release_heads)
2022 {
2023         const ModelAction *rf = read->get_reads_from();
2024         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2025         sequence->acquire = acquire;
2026         sequence->read = read;
2027
2028         if (!release_seq_heads(rf, release_heads, sequence)) {
2029                 /* add act to 'lazy checking' list */
2030                 pending_rel_seqs.push_back(sequence);
2031         } else {
2032                 snapshot_free(sequence);
2033         }
2034 }
2035
2036 /**
2037  * Attempt to resolve all stashed operations that might synchronize with a
2038  * release sequence for a given location. This implements the "lazy" portion of
2039  * determining whether or not a release sequence was contiguous, since not all
2040  * modification order information is present at the time an action occurs.
2041  *
2042  * @param location The location/object that should be checked for release
2043  * sequence resolutions. A NULL value means to check all locations.
2044  * @param work_queue The work queue to which to add work items as they are
2045  * generated
2046  * @return True if any updates occurred (new synchronization, new mo_graph
2047  * edges)
2048  */
2049 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2050 {
2051         bool updated = false;
2052         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2053         while (it != pending_rel_seqs.end()) {
2054                 struct release_seq *pending = *it;
2055                 ModelAction *acquire = pending->acquire;
2056                 const ModelAction *read = pending->read;
2057
2058                 /* Only resolve sequences on the given location, if provided */
2059                 if (location && read->get_location() != location) {
2060                         it++;
2061                         continue;
2062                 }
2063
2064                 const ModelAction *rf = read->get_reads_from();
2065                 rel_heads_list_t release_heads;
2066                 bool complete;
2067                 complete = release_seq_heads(rf, &release_heads, pending);
2068                 for (unsigned int i = 0; i < release_heads.size(); i++)
2069                         if (!acquire->has_synchronized_with(release_heads[i]))
2070                                 if (synchronize(release_heads[i], acquire))
2071                                         updated = true;
2072
2073                 if (updated) {
2074                         /* Re-check all pending release sequences */
2075                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2076                         /* Re-check read-acquire for mo_graph edges */
2077                         if (acquire->is_read())
2078                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2079
2080                         /* propagate synchronization to later actions */
2081                         action_list_t::reverse_iterator rit = action_trace.rbegin();
2082                         for (; (*rit) != acquire; rit++) {
2083                                 ModelAction *propagate = *rit;
2084                                 if (acquire->happens_before(propagate)) {
2085                                         synchronize(acquire, propagate);
2086                                         /* Re-check 'propagate' for mo_graph edges */
2087                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2088                                 }
2089                         }
2090                 }
2091                 if (complete) {
2092                         it = pending_rel_seqs.erase(it);
2093                         snapshot_free(pending);
2094                 } else {
2095                         it++;
2096                 }
2097         }
2098
2099         // If we resolved promises or data races, see if we have realized a data race.
2100         checkDataRaces();
2101
2102         return updated;
2103 }
2104
2105 /**
2106  * Performs various bookkeeping operations for the current ModelAction. For
2107  * instance, adds action to the per-object, per-thread action vector and to the
2108  * action trace list of all thread actions.
2109  *
2110  * @param act is the ModelAction to add.
2111  */
2112 void ModelExecution::add_action_to_lists(ModelAction *act)
2113 {
2114         int tid = id_to_int(act->get_tid());
2115         ModelAction *uninit = NULL;
2116         int uninit_id = -1;
2117         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2118         if (list->empty() && act->is_atomic_var()) {
2119                 uninit = get_uninitialized_action(act);
2120                 uninit_id = id_to_int(uninit->get_tid());
2121                 list->push_front(uninit);
2122         }
2123         list->push_back(act);
2124
2125         action_trace.push_back(act);
2126         if (uninit)
2127                 action_trace.push_front(uninit);
2128
2129         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2130         if (tid >= (int)vec->size())
2131                 vec->resize(priv->next_thread_id);
2132         (*vec)[tid].push_back(act);
2133         if (uninit)
2134                 (*vec)[uninit_id].push_front(uninit);
2135
2136         if ((int)thrd_last_action.size() <= tid)
2137                 thrd_last_action.resize(get_num_threads());
2138         thrd_last_action[tid] = act;
2139         if (uninit)
2140                 thrd_last_action[uninit_id] = uninit;
2141
2142         if (act->is_fence() && act->is_release()) {
2143                 if ((int)thrd_last_fence_release.size() <= tid)
2144                         thrd_last_fence_release.resize(get_num_threads());
2145                 thrd_last_fence_release[tid] = act;
2146         }
2147
2148         if (act->is_wait()) {
2149                 void *mutex_loc = (void *) act->get_value();
2150                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2151
2152                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2153                 if (tid >= (int)vec->size())
2154                         vec->resize(priv->next_thread_id);
2155                 (*vec)[tid].push_back(act);
2156         }
2157 }
2158
2159 /**
2160  * @brief Get the last action performed by a particular Thread
2161  * @param tid The thread ID of the Thread in question
2162  * @return The last action in the thread
2163  */
2164 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2165 {
2166         int threadid = id_to_int(tid);
2167         if (threadid < (int)thrd_last_action.size())
2168                 return thrd_last_action[id_to_int(tid)];
2169         else
2170                 return NULL;
2171 }
2172
2173 /**
2174  * @brief Get the last fence release performed by a particular Thread
2175  * @param tid The thread ID of the Thread in question
2176  * @return The last fence release in the thread, if one exists; NULL otherwise
2177  */
2178 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2179 {
2180         int threadid = id_to_int(tid);
2181         if (threadid < (int)thrd_last_fence_release.size())
2182                 return thrd_last_fence_release[id_to_int(tid)];
2183         else
2184                 return NULL;
2185 }
2186
2187 /**
2188  * Gets the last memory_order_seq_cst write (in the total global sequence)
2189  * performed on a particular object (i.e., memory location), not including the
2190  * current action.
2191  * @param curr The current ModelAction; also denotes the object location to
2192  * check
2193  * @return The last seq_cst write
2194  */
2195 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2196 {
2197         void *location = curr->get_location();
2198         action_list_t *list = obj_map.get(location);
2199         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2200         action_list_t::reverse_iterator rit;
2201         for (rit = list->rbegin(); (*rit) != curr; rit++)
2202                 ;
2203         rit++; /* Skip past curr */
2204         for ( ; rit != list->rend(); rit++)
2205                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2206                         return *rit;
2207         return NULL;
2208 }
2209
2210 /**
2211  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2212  * performed in a particular thread, prior to a particular fence.
2213  * @param tid The ID of the thread to check
2214  * @param before_fence The fence from which to begin the search; if NULL, then
2215  * search for the most recent fence in the thread.
2216  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2217  */
2218 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2219 {
2220         /* All fences should have location FENCE_LOCATION */
2221         action_list_t *list = obj_map.get(FENCE_LOCATION);
2222
2223         if (!list)
2224                 return NULL;
2225
2226         action_list_t::reverse_iterator rit = list->rbegin();
2227
2228         if (before_fence) {
2229                 for (; rit != list->rend(); rit++)
2230                         if (*rit == before_fence)
2231                                 break;
2232
2233                 ASSERT(*rit == before_fence);
2234                 rit++;
2235         }
2236
2237         for (; rit != list->rend(); rit++)
2238                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2239                         return *rit;
2240         return NULL;
2241 }
2242
2243 /**
2244  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2245  * location). This function identifies the mutex according to the current
2246  * action, which is presumed to perform on the same mutex.
2247  * @param curr The current ModelAction; also denotes the object location to
2248  * check
2249  * @return The last unlock operation
2250  */
2251 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2252 {
2253         void *location = curr->get_location();
2254         action_list_t *list = obj_map.get(location);
2255         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2256         action_list_t::reverse_iterator rit;
2257         for (rit = list->rbegin(); rit != list->rend(); rit++)
2258                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2259                         return *rit;
2260         return NULL;
2261 }
2262
2263 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2264 {
2265         ModelAction *parent = get_last_action(tid);
2266         if (!parent)
2267                 parent = get_thread(tid)->get_creation();
2268         return parent;
2269 }
2270
2271 /**
2272  * Returns the clock vector for a given thread.
2273  * @param tid The thread whose clock vector we want
2274  * @return Desired clock vector
2275  */
2276 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2277 {
2278         return get_parent_action(tid)->get_cv();
2279 }
2280
2281 /**
2282  * @brief Find the promise (if any) to resolve for the current action and
2283  * remove it from the pending promise vector
2284  * @param curr The current ModelAction. Should be a write.
2285  * @return The Promise to resolve, if any; otherwise NULL
2286  */
2287 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2288 {
2289         for (unsigned int i = 0; i < promises.size(); i++)
2290                 if (curr->get_node()->get_promise(i)) {
2291                         Promise *ret = promises[i];
2292                         promises.erase(promises.begin() + i);
2293                         return ret;
2294                 }
2295         return NULL;
2296 }
2297
2298 /**
2299  * Resolve a Promise with a current write.
2300  * @param write The ModelAction that is fulfilling Promises
2301  * @param promise The Promise to resolve
2302  * @return True if the Promise was successfully resolved; false otherwise
2303  */
2304 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2305 {
2306         ModelVector<ModelAction *> actions_to_check;
2307
2308         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2309                 ModelAction *read = promise->get_reader(i);
2310                 read_from(read, write);
2311                 actions_to_check.push_back(read);
2312         }
2313         /* Make sure the promise's value matches the write's value */
2314         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2315         if (!mo_graph->resolvePromise(promise, write))
2316                 priv->failed_promise = true;
2317
2318         /**
2319          * @todo  It is possible to end up in an inconsistent state, where a
2320          * "resolved" promise may still be referenced if
2321          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2322          *
2323          * Note that the inconsistency only matters when dumping mo_graph to
2324          * file.
2325          *
2326          * delete promise;
2327          */
2328
2329         //Check whether reading these writes has made threads unable to
2330         //resolve promises
2331         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2332                 ModelAction *read = actions_to_check[i];
2333                 mo_check_promises(read, true);
2334         }
2335
2336         return true;
2337 }
2338
2339 /**
2340  * Compute the set of promises that could potentially be satisfied by this
2341  * action. Note that the set computation actually appears in the Node, not in
2342  * ModelExecution.
2343  * @param curr The ModelAction that may satisfy promises
2344  */
2345 void ModelExecution::compute_promises(ModelAction *curr)
2346 {
2347         for (unsigned int i = 0; i < promises.size(); i++) {
2348                 Promise *promise = promises[i];
2349                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2350                         continue;
2351
2352                 bool satisfy = true;
2353                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2354                         const ModelAction *act = promise->get_reader(j);
2355                         if (act->happens_before(curr) ||
2356                                         act->could_synchronize_with(curr)) {
2357                                 satisfy = false;
2358                                 break;
2359                         }
2360                 }
2361                 if (satisfy)
2362                         curr->get_node()->set_promise(i);
2363         }
2364 }
2365
2366 /** Checks promises in response to change in ClockVector Threads. */
2367 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2368 {
2369         for (unsigned int i = 0; i < promises.size(); i++) {
2370                 Promise *promise = promises[i];
2371                 if (!promise->thread_is_available(tid))
2372                         continue;
2373                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2374                         const ModelAction *act = promise->get_reader(j);
2375                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2376                                         merge_cv->synchronized_since(act)) {
2377                                 if (promise->eliminate_thread(tid)) {
2378                                         /* Promise has failed */
2379                                         priv->failed_promise = true;
2380                                         return;
2381                                 }
2382                         }
2383                 }
2384         }
2385 }
2386
2387 void ModelExecution::check_promises_thread_disabled()
2388 {
2389         for (unsigned int i = 0; i < promises.size(); i++) {
2390                 Promise *promise = promises[i];
2391                 if (promise->has_failed()) {
2392                         priv->failed_promise = true;
2393                         return;
2394                 }
2395         }
2396 }
2397
2398 /**
2399  * @brief Checks promises in response to addition to modification order for
2400  * threads.
2401  *
2402  * We test whether threads are still available for satisfying promises after an
2403  * addition to our modification order constraints. Those that are unavailable
2404  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2405  * that promise has failed.
2406  *
2407  * @param act The ModelAction which updated the modification order
2408  * @param is_read_check Should be true if act is a read and we must check for
2409  * updates to the store from which it read (there is a distinction here for
2410  * RMW's, which are both a load and a store)
2411  */
2412 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2413 {
2414         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2415
2416         for (unsigned int i = 0; i < promises.size(); i++) {
2417                 Promise *promise = promises[i];
2418
2419                 // Is this promise on the same location?
2420                 if (!promise->same_location(write))
2421                         continue;
2422
2423                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2424                         const ModelAction *pread = promise->get_reader(j);
2425                         if (!pread->happens_before(act))
2426                                continue;
2427                         if (mo_graph->checkPromise(write, promise)) {
2428                                 priv->failed_promise = true;
2429                                 return;
2430                         }
2431                         break;
2432                 }
2433
2434                 // Don't do any lookups twice for the same thread
2435                 if (!promise->thread_is_available(act->get_tid()))
2436                         continue;
2437
2438                 if (mo_graph->checkReachable(promise, write)) {
2439                         if (mo_graph->checkPromise(write, promise)) {
2440                                 priv->failed_promise = true;
2441                                 return;
2442                         }
2443                 }
2444         }
2445 }
2446
2447 /**
2448  * Compute the set of writes that may break the current pending release
2449  * sequence. This information is extracted from previou release sequence
2450  * calculations.
2451  *
2452  * @param curr The current ModelAction. Must be a release sequence fixup
2453  * action.
2454  */
2455 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2456 {
2457         if (pending_rel_seqs.empty())
2458                 return;
2459
2460         struct release_seq *pending = pending_rel_seqs.back();
2461         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2462                 const ModelAction *write = pending->writes[i];
2463                 curr->get_node()->add_relseq_break(write);
2464         }
2465
2466         /* NULL means don't break the sequence; just synchronize */
2467         curr->get_node()->add_relseq_break(NULL);
2468 }
2469
2470 /**
2471  * Build up an initial set of all past writes that this 'read' action may read
2472  * from, as well as any previously-observed future values that must still be valid.
2473  *
2474  * @param curr is the current ModelAction that we are exploring; it must be a
2475  * 'read' operation.
2476  */
2477 void ModelExecution::build_may_read_from(ModelAction *curr)
2478 {
2479         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2480         unsigned int i;
2481         ASSERT(curr->is_read());
2482
2483         ModelAction *last_sc_write = NULL;
2484
2485         if (curr->is_seqcst())
2486                 last_sc_write = get_last_seq_cst_write(curr);
2487
2488         /* Iterate over all threads */
2489         for (i = 0; i < thrd_lists->size(); i++) {
2490                 /* Iterate over actions in thread, starting from most recent */
2491                 action_list_t *list = &(*thrd_lists)[i];
2492                 action_list_t::reverse_iterator rit;
2493                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2494                         ModelAction *act = *rit;
2495
2496                         /* Only consider 'write' actions */
2497                         if (!act->is_write() || act == curr)
2498                                 continue;
2499
2500                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2501                         bool allow_read = true;
2502
2503                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2504                                 allow_read = false;
2505                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2506                                 allow_read = false;
2507
2508                         if (allow_read) {
2509                                 /* Only add feasible reads */
2510                                 mo_graph->startChanges();
2511                                 r_modification_order(curr, act);
2512                                 if (!is_infeasible())
2513                                         curr->get_node()->add_read_from_past(act);
2514                                 mo_graph->rollbackChanges();
2515                         }
2516
2517                         /* Include at most one act per-thread that "happens before" curr */
2518                         if (act->happens_before(curr))
2519                                 break;
2520                 }
2521         }
2522
2523         /* Inherit existing, promised future values */
2524         for (i = 0; i < promises.size(); i++) {
2525                 const Promise *promise = promises[i];
2526                 const ModelAction *promise_read = promise->get_reader(0);
2527                 if (promise_read->same_var(curr)) {
2528                         /* Only add feasible future-values */
2529                         mo_graph->startChanges();
2530                         r_modification_order(curr, promise);
2531                         if (!is_infeasible())
2532                                 curr->get_node()->add_read_from_promise(promise_read);
2533                         mo_graph->rollbackChanges();
2534                 }
2535         }
2536
2537         /* We may find no valid may-read-from only if the execution is doomed */
2538         if (!curr->get_node()->read_from_size()) {
2539                 priv->no_valid_reads = true;
2540                 set_assert();
2541         }
2542
2543         if (DBG_ENABLED()) {
2544                 model_print("Reached read action:\n");
2545                 curr->print();
2546                 model_print("Printing read_from_past\n");
2547                 curr->get_node()->print_read_from_past();
2548                 model_print("End printing read_from_past\n");
2549         }
2550 }
2551
2552 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2553 {
2554         for ( ; write != NULL; write = write->get_reads_from()) {
2555                 /* UNINIT actions don't have a Node, and they never sleep */
2556                 if (write->is_uninitialized())
2557                         return true;
2558                 Node *prevnode = write->get_node()->get_parent();
2559
2560                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2561                 if (write->is_release() && thread_sleep)
2562                         return true;
2563                 if (!write->is_rmw())
2564                         return false;
2565         }
2566         return true;
2567 }
2568
2569 /**
2570  * @brief Get an action representing an uninitialized atomic
2571  *
2572  * This function may create a new one or try to retrieve one from the NodeStack
2573  *
2574  * @param curr The current action, which prompts the creation of an UNINIT action
2575  * @return A pointer to the UNINIT ModelAction
2576  */
2577 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2578 {
2579         Node *node = curr->get_node();
2580         ModelAction *act = node->get_uninit_action();
2581         if (!act) {
2582                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2583                 node->set_uninit_action(act);
2584         }
2585         act->create_cv(NULL);
2586         return act;
2587 }
2588
2589 static void print_list(const action_list_t *list)
2590 {
2591         action_list_t::const_iterator it;
2592
2593         model_print("---------------------------------------------------------------------\n");
2594
2595         unsigned int hash = 0;
2596
2597         for (it = list->begin(); it != list->end(); it++) {
2598                 const ModelAction *act = *it;
2599                 if (act->get_seq_number() > 0)
2600                         act->print();
2601                 hash = hash^(hash<<3)^((*it)->hash());
2602         }
2603         model_print("HASH %u\n", hash);
2604         model_print("---------------------------------------------------------------------\n");
2605 }
2606
2607 #if SUPPORT_MOD_ORDER_DUMP
2608 void ModelExecution::dumpGraph(char *filename) const
2609 {
2610         char buffer[200];
2611         sprintf(buffer, "%s.dot", filename);
2612         FILE *file = fopen(buffer, "w");
2613         fprintf(file, "digraph %s {\n", filename);
2614         mo_graph->dumpNodes(file);
2615         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2616
2617         for (action_list_t::iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2618                 ModelAction *act = *it;
2619                 if (act->is_read()) {
2620                         mo_graph->dot_print_node(file, act);
2621                         if (act->get_reads_from())
2622                                 mo_graph->dot_print_edge(file,
2623                                                 act->get_reads_from(),
2624                                                 act,
2625                                                 "label=\"rf\", color=red, weight=2");
2626                         else
2627                                 mo_graph->dot_print_edge(file,
2628                                                 act->get_reads_from_promise(),
2629                                                 act,
2630                                                 "label=\"rf\", color=red");
2631                 }
2632                 if (thread_array[act->get_tid()]) {
2633                         mo_graph->dot_print_edge(file,
2634                                         thread_array[id_to_int(act->get_tid())],
2635                                         act,
2636                                         "label=\"sb\", color=blue, weight=400");
2637                 }
2638
2639                 thread_array[act->get_tid()] = act;
2640         }
2641         fprintf(file, "}\n");
2642         model_free(thread_array);
2643         fclose(file);
2644 }
2645 #endif
2646
2647 /** @brief Prints an execution trace summary. */
2648 void ModelExecution::print_summary() const
2649 {
2650 #if SUPPORT_MOD_ORDER_DUMP
2651         char buffername[100];
2652         sprintf(buffername, "exec%04u", get_execution_number());
2653         mo_graph->dumpGraphToFile(buffername);
2654         sprintf(buffername, "graph%04u", get_execution_number());
2655         dumpGraph(buffername);
2656 #endif
2657
2658         model_print("Execution %d:", get_execution_number());
2659         if (isfeasibleprefix()) {
2660                 if (scheduler->all_threads_sleeping())
2661                         model_print(" SLEEP-SET REDUNDANT");
2662                 model_print("\n");
2663         } else
2664                 print_infeasibility(" INFEASIBLE");
2665         print_list(&action_trace);
2666         model_print("\n");
2667         if (!promises.empty()) {
2668                 model_print("Pending promises:\n");
2669                 for (unsigned int i = 0; i < promises.size(); i++) {
2670                         model_print(" [P%u] ", i);
2671                         promises[i]->print();
2672                 }
2673                 model_print("\n");
2674         }
2675 }
2676
2677 /**
2678  * Add a Thread to the system for the first time. Should only be called once
2679  * per thread.
2680  * @param t The Thread to add
2681  */
2682 void ModelExecution::add_thread(Thread *t)
2683 {
2684         unsigned int i = id_to_int(t->get_id());
2685         if (i >= thread_map.size())
2686                 thread_map.resize(i + 1);
2687         thread_map[i] = t;
2688         if (!t->is_model_thread())
2689                 scheduler->add_thread(t);
2690 }
2691
2692 /**
2693  * @brief Get a Thread reference by its ID
2694  * @param tid The Thread's ID
2695  * @return A Thread reference
2696  */
2697 Thread * ModelExecution::get_thread(thread_id_t tid) const
2698 {
2699         unsigned int i = id_to_int(tid);
2700         if (i < thread_map.size())
2701                 return thread_map[i];
2702         return NULL;
2703 }
2704
2705 /**
2706  * @brief Get a reference to the Thread in which a ModelAction was executed
2707  * @param act The ModelAction
2708  * @return A Thread reference
2709  */
2710 Thread * ModelExecution::get_thread(const ModelAction *act) const
2711 {
2712         return get_thread(act->get_tid());
2713 }
2714
2715 /**
2716  * @brief Get a Promise's "promise number"
2717  *
2718  * A "promise number" is an index number that is unique to a promise, valid
2719  * only for a specific snapshot of an execution trace. Promises may come and go
2720  * as they are generated an resolved, so an index only retains meaning for the
2721  * current snapshot.
2722  *
2723  * @param promise The Promise to check
2724  * @return The promise index, if the promise still is valid; otherwise -1
2725  */
2726 int ModelExecution::get_promise_number(const Promise *promise) const
2727 {
2728         for (unsigned int i = 0; i < promises.size(); i++)
2729                 if (promises[i] == promise)
2730                         return i;
2731         /* Not found */
2732         return -1;
2733 }
2734
2735 /**
2736  * @brief Check if a Thread is currently enabled
2737  * @param t The Thread to check
2738  * @return True if the Thread is currently enabled
2739  */
2740 bool ModelExecution::is_enabled(Thread *t) const
2741 {
2742         return scheduler->is_enabled(t);
2743 }
2744
2745 /**
2746  * @brief Check if a Thread is currently enabled
2747  * @param tid The ID of the Thread to check
2748  * @return True if the Thread is currently enabled
2749  */
2750 bool ModelExecution::is_enabled(thread_id_t tid) const
2751 {
2752         return scheduler->is_enabled(tid);
2753 }
2754
2755 /**
2756  * @brief Select the next thread to execute based on the curren action
2757  *
2758  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2759  * actions should be followed by the execution of their child thread. In either
2760  * case, the current action should determine the next thread schedule.
2761  *
2762  * @param curr The current action
2763  * @return The next thread to run, if the current action will determine this
2764  * selection; otherwise NULL
2765  */
2766 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2767 {
2768         /* Do not split atomic RMW */
2769         if (curr->is_rmwr())
2770                 return get_thread(curr);
2771         /* Follow CREATE with the created thread */
2772         if (curr->get_type() == THREAD_CREATE)
2773                 return curr->get_thread_operand();
2774         return NULL;
2775 }
2776
2777 /** @return True if the execution has taken too many steps */
2778 bool ModelExecution::too_many_steps() const
2779 {
2780         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2781 }
2782
2783 /**
2784  * Takes the next step in the execution, if possible.
2785  * @param curr The current step to take
2786  * @return Returns the next Thread to run, if any; NULL if this execution
2787  * should terminate
2788  */
2789 Thread * ModelExecution::take_step(ModelAction *curr)
2790 {
2791         Thread *curr_thrd = get_thread(curr);
2792         ASSERT(curr_thrd->get_state() == THREAD_READY);
2793
2794         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2795         curr = check_current_action(curr);
2796         ASSERT(curr);
2797
2798         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2799                 scheduler->remove_thread(curr_thrd);
2800
2801         return action_select_next_thread(curr);
2802 }
2803
2804 /**
2805  * Launch end-of-execution release sequence fixups only when
2806  * the execution is otherwise feasible AND there are:
2807  *
2808  * (1) pending release sequences
2809  * (2) pending assertions that could be invalidated by a change
2810  * in clock vectors (i.e., data races)
2811  * (3) no pending promises
2812  */
2813 void ModelExecution::fixup_release_sequences()
2814 {
2815         while (!pending_rel_seqs.empty() &&
2816                         is_feasible_prefix_ignore_relseq() &&
2817                         !unrealizedraces.empty()) {
2818                 model_print("*** WARNING: release sequence fixup action "
2819                                 "(%zu pending release seuqence(s)) ***\n",
2820                                 pending_rel_seqs.size());
2821                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2822                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2823                                 model_thread);
2824                 take_step(fixup);
2825         };
2826 }