e038961d211ca074c0b8deece5fe5fab7eccde51
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(new action_list_t()),
68         thread_map(),
69         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         promises(),
73         futurevalues(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         thread_map.put(id_to_int(model_thread->get_id()), model_thread);
84         scheduler->register_engine(this);
85 }
86
87 /** @brief Destructor */
88 ModelExecution::~ModelExecution()
89 {
90         for (unsigned int i = 0; i < get_num_threads(); i++)
91                 delete thread_map.get(i);
92
93         delete obj_map;
94         delete action_trace;
95
96         for (unsigned int i = 0; i < promises.size(); i++)
97                 delete promises[i];
98
99         delete mo_graph;
100         delete priv;
101 }
102
103 int ModelExecution::get_execution_number() const
104 {
105         return model->get_execution_number();
106 }
107
108 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
109 {
110         action_list_t *tmp = hash->get(ptr);
111         if (tmp == NULL) {
112                 tmp = new action_list_t();
113                 hash->put(ptr, tmp);
114         }
115         return tmp;
116 }
117
118 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
119 {
120         SnapVector<action_list_t> *tmp = hash->get(ptr);
121         if (tmp == NULL) {
122                 tmp = new SnapVector<action_list_t>();
123                 hash->put(ptr, tmp);
124         }
125         return tmp;
126 }
127
128 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
129 {
130         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
131         if (wrv==NULL)
132                 return NULL;
133         unsigned int thread=id_to_int(tid);
134         if (thread < wrv->size())
135                 return &(*wrv)[thread];
136         else
137                 return NULL;
138 }
139
140 /** @return a thread ID for a new Thread */
141 thread_id_t ModelExecution::get_next_id()
142 {
143         return priv->next_thread_id++;
144 }
145
146 /** @return the number of user threads created during this execution */
147 unsigned int ModelExecution::get_num_threads() const
148 {
149         return priv->next_thread_id;
150 }
151
152 /** @return a sequence number for a new ModelAction */
153 modelclock_t ModelExecution::get_next_seq_num()
154 {
155         return ++priv->used_sequence_numbers;
156 }
157
158 /**
159  * @brief Should the current action wake up a given thread?
160  *
161  * @param curr The current action
162  * @param thread The thread that we might wake up
163  * @return True, if we should wake up the sleeping thread; false otherwise
164  */
165 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
166 {
167         const ModelAction *asleep = thread->get_pending();
168         /* Don't allow partial RMW to wake anyone up */
169         if (curr->is_rmwr())
170                 return false;
171         /* Synchronizing actions may have been backtracked */
172         if (asleep->could_synchronize_with(curr))
173                 return true;
174         /* All acquire/release fences and fence-acquire/store-release */
175         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
176                 return true;
177         /* Fence-release + store can awake load-acquire on the same location */
178         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
179                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
180                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
181                         return true;
182         }
183         return false;
184 }
185
186 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
187 {
188         for (unsigned int i = 0; i < get_num_threads(); i++) {
189                 Thread *thr = get_thread(int_to_id(i));
190                 if (scheduler->is_sleep_set(thr)) {
191                         if (should_wake_up(curr, thr))
192                                 /* Remove this thread from sleep set */
193                                 scheduler->remove_sleep(thr);
194                 }
195         }
196 }
197
198 /** @brief Alert the model-checker that an incorrectly-ordered
199  * synchronization was made */
200 void ModelExecution::set_bad_synchronization()
201 {
202         priv->bad_synchronization = true;
203 }
204
205 bool ModelExecution::assert_bug(const char *msg)
206 {
207         priv->bugs.push_back(new bug_message(msg));
208
209         if (isfeasibleprefix()) {
210                 set_assert();
211                 return true;
212         }
213         return false;
214 }
215
216 /** @return True, if any bugs have been reported for this execution */
217 bool ModelExecution::have_bug_reports() const
218 {
219         return priv->bugs.size() != 0;
220 }
221
222 SnapVector<bug_message *> * ModelExecution::get_bugs() const
223 {
224         return &priv->bugs;
225 }
226
227 /**
228  * Check whether the current trace has triggered an assertion which should halt
229  * its execution.
230  *
231  * @return True, if the execution should be aborted; false otherwise
232  */
233 bool ModelExecution::has_asserted() const
234 {
235         return priv->asserted;
236 }
237
238 /**
239  * Trigger a trace assertion which should cause this execution to be halted.
240  * This can be due to a detected bug or due to an infeasibility that should
241  * halt ASAP.
242  */
243 void ModelExecution::set_assert()
244 {
245         priv->asserted = true;
246 }
247
248 /**
249  * Check if we are in a deadlock. Should only be called at the end of an
250  * execution, although it should not give false positives in the middle of an
251  * execution (there should be some ENABLED thread).
252  *
253  * @return True if program is in a deadlock; false otherwise
254  */
255 bool ModelExecution::is_deadlocked() const
256 {
257         bool blocking_threads = false;
258         for (unsigned int i = 0; i < get_num_threads(); i++) {
259                 thread_id_t tid = int_to_id(i);
260                 if (is_enabled(tid))
261                         return false;
262                 Thread *t = get_thread(tid);
263                 if (!t->is_model_thread() && t->get_pending())
264                         blocking_threads = true;
265         }
266         return blocking_threads;
267 }
268
269 /**
270  * Check if this is a complete execution. That is, have all thread completed
271  * execution (rather than exiting because sleep sets have forced a redundant
272  * execution).
273  *
274  * @return True if the execution is complete.
275  */
276 bool ModelExecution::is_complete_execution() const
277 {
278         for (unsigned int i = 0; i < get_num_threads(); i++)
279                 if (is_enabled(int_to_id(i)))
280                         return false;
281         return true;
282 }
283
284 /**
285  * @brief Find the last fence-related backtracking conflict for a ModelAction
286  *
287  * This function performs the search for the most recent conflicting action
288  * against which we should perform backtracking, as affected by fence
289  * operations. This includes pairs of potentially-synchronizing actions which
290  * occur due to fence-acquire or fence-release, and hence should be explored in
291  * the opposite execution order.
292  *
293  * @param act The current action
294  * @return The most recent action which conflicts with act due to fences
295  */
296 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
297 {
298         /* Only perform release/acquire fence backtracking for stores */
299         if (!act->is_write())
300                 return NULL;
301
302         /* Find a fence-release (or, act is a release) */
303         ModelAction *last_release;
304         if (act->is_release())
305                 last_release = act;
306         else
307                 last_release = get_last_fence_release(act->get_tid());
308         if (!last_release)
309                 return NULL;
310
311         /* Skip past the release */
312         action_list_t *list = action_trace;
313         action_list_t::reverse_iterator rit;
314         for (rit = list->rbegin(); rit != list->rend(); rit++)
315                 if (*rit == last_release)
316                         break;
317         ASSERT(rit != list->rend());
318
319         /* Find a prior:
320          *   load-acquire
321          * or
322          *   load --sb-> fence-acquire */
323         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
324         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
325         bool found_acquire_fences = false;
326         for ( ; rit != list->rend(); rit++) {
327                 ModelAction *prev = *rit;
328                 if (act->same_thread(prev))
329                         continue;
330
331                 int tid = id_to_int(prev->get_tid());
332
333                 if (prev->is_read() && act->same_var(prev)) {
334                         if (prev->is_acquire()) {
335                                 /* Found most recent load-acquire, don't need
336                                  * to search for more fences */
337                                 if (!found_acquire_fences)
338                                         return NULL;
339                         } else {
340                                 prior_loads[tid] = prev;
341                         }
342                 }
343                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
344                         found_acquire_fences = true;
345                         acquire_fences[tid] = prev;
346                 }
347         }
348
349         ModelAction *latest_backtrack = NULL;
350         for (unsigned int i = 0; i < acquire_fences.size(); i++)
351                 if (acquire_fences[i] && prior_loads[i])
352                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
353                                 latest_backtrack = acquire_fences[i];
354         return latest_backtrack;
355 }
356
357 /**
358  * @brief Find the last backtracking conflict for a ModelAction
359  *
360  * This function performs the search for the most recent conflicting action
361  * against which we should perform backtracking. This primary includes pairs of
362  * synchronizing actions which should be explored in the opposite execution
363  * order.
364  *
365  * @param act The current action
366  * @return The most recent action which conflicts with act
367  */
368 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
369 {
370         switch (act->get_type()) {
371         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
372         case ATOMIC_READ:
373         case ATOMIC_WRITE:
374         case ATOMIC_RMW: {
375                 ModelAction *ret = NULL;
376
377                 /* linear search: from most recent to oldest */
378                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
379                 action_list_t::reverse_iterator rit;
380                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
381                         ModelAction *prev = *rit;
382                         if (prev->could_synchronize_with(act)) {
383                                 ret = prev;
384                                 break;
385                         }
386                 }
387
388                 ModelAction *ret2 = get_last_fence_conflict(act);
389                 if (!ret2)
390                         return ret;
391                 if (!ret)
392                         return ret2;
393                 if (*ret < *ret2)
394                         return ret2;
395                 return ret;
396         }
397         case ATOMIC_LOCK:
398         case ATOMIC_TRYLOCK: {
399                 /* linear search: from most recent to oldest */
400                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
401                 action_list_t::reverse_iterator rit;
402                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
403                         ModelAction *prev = *rit;
404                         if (act->is_conflicting_lock(prev))
405                                 return prev;
406                 }
407                 break;
408         }
409         case ATOMIC_UNLOCK: {
410                 /* linear search: from most recent to oldest */
411                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
412                 action_list_t::reverse_iterator rit;
413                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
414                         ModelAction *prev = *rit;
415                         if (!act->same_thread(prev) && prev->is_failed_trylock())
416                                 return prev;
417                 }
418                 break;
419         }
420         case ATOMIC_WAIT: {
421                 /* linear search: from most recent to oldest */
422                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
423                 action_list_t::reverse_iterator rit;
424                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
425                         ModelAction *prev = *rit;
426                         if (!act->same_thread(prev) && prev->is_failed_trylock())
427                                 return prev;
428                         if (!act->same_thread(prev) && prev->is_notify())
429                                 return prev;
430                 }
431                 break;
432         }
433
434         case ATOMIC_NOTIFY_ALL:
435         case ATOMIC_NOTIFY_ONE: {
436                 /* linear search: from most recent to oldest */
437                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
438                 action_list_t::reverse_iterator rit;
439                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
440                         ModelAction *prev = *rit;
441                         if (!act->same_thread(prev) && prev->is_wait())
442                                 return prev;
443                 }
444                 break;
445         }
446         default:
447                 break;
448         }
449         return NULL;
450 }
451
452 /** This method finds backtracking points where we should try to
453  * reorder the parameter ModelAction against.
454  *
455  * @param the ModelAction to find backtracking points for.
456  */
457 void ModelExecution::set_backtracking(ModelAction *act)
458 {
459         Thread *t = get_thread(act);
460         ModelAction *prev = get_last_conflict(act);
461         if (prev == NULL)
462                 return;
463
464         Node *node = prev->get_node()->get_parent();
465
466         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
467         int low_tid, high_tid;
468         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
469                 low_tid = id_to_int(act->get_tid());
470                 high_tid = low_tid + 1;
471         } else {
472                 low_tid = 0;
473                 high_tid = get_num_threads();
474         }
475
476         for (int i = low_tid; i < high_tid; i++) {
477                 thread_id_t tid = int_to_id(i);
478
479                 /* Make sure this thread can be enabled here. */
480                 if (i >= node->get_num_threads())
481                         break;
482
483                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
484                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
485                 if (node->enabled_status(tid) != THREAD_ENABLED)
486                         continue;
487
488                 /* Check if this has been explored already */
489                 if (node->has_been_explored(tid))
490                         continue;
491
492                 /* See if fairness allows */
493                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
494                         bool unfair = false;
495                         for (int t = 0; t < node->get_num_threads(); t++) {
496                                 thread_id_t tother = int_to_id(t);
497                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
498                                         unfair = true;
499                                         break;
500                                 }
501                         }
502                         if (unfair)
503                                 continue;
504                 }
505
506                 /* See if CHESS-like yield fairness allows */
507                 if (params->yieldon) {
508                         bool unfair = false;
509                         for (int t = 0; t < node->get_num_threads(); t++) {
510                                 thread_id_t tother = int_to_id(t);
511                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
512                                         unfair = true;
513                                         break;
514                                 }
515                         }
516                         if (unfair)
517                                 continue;
518                 }
519
520                 /* Cache the latest backtracking point */
521                 set_latest_backtrack(prev);
522
523                 /* If this is a new backtracking point, mark the tree */
524                 if (!node->set_backtrack(tid))
525                         continue;
526                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
527                                         id_to_int(prev->get_tid()),
528                                         id_to_int(t->get_id()));
529                 if (DBG_ENABLED()) {
530                         prev->print();
531                         act->print();
532                 }
533         }
534 }
535
536 /**
537  * @brief Cache the a backtracking point as the "most recent", if eligible
538  *
539  * Note that this does not prepare the NodeStack for this backtracking
540  * operation, it only caches the action on a per-execution basis
541  *
542  * @param act The operation at which we should explore a different next action
543  * (i.e., backtracking point)
544  * @return True, if this action is now the most recent backtracking point;
545  * false otherwise
546  */
547 bool ModelExecution::set_latest_backtrack(ModelAction *act)
548 {
549         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
550                 priv->next_backtrack = act;
551                 return true;
552         }
553         return false;
554 }
555
556 /**
557  * Returns last backtracking point. The model checker will explore a different
558  * path for this point in the next execution.
559  * @return The ModelAction at which the next execution should diverge.
560  */
561 ModelAction * ModelExecution::get_next_backtrack()
562 {
563         ModelAction *next = priv->next_backtrack;
564         priv->next_backtrack = NULL;
565         return next;
566 }
567
568 /**
569  * Processes a read model action.
570  * @param curr is the read model action to process.
571  * @return True if processing this read updates the mo_graph.
572  */
573 bool ModelExecution::process_read(ModelAction *curr)
574 {
575         Node *node = curr->get_node();
576         while (true) {
577                 bool updated = false;
578                 switch (node->get_read_from_status()) {
579                 case READ_FROM_PAST: {
580                         const ModelAction *rf = node->get_read_from_past();
581                         ASSERT(rf);
582
583                         mo_graph->startChanges();
584
585                         ASSERT(!is_infeasible());
586                         if (!check_recency(curr, rf)) {
587                                 if (node->increment_read_from()) {
588                                         mo_graph->rollbackChanges();
589                                         continue;
590                                 } else {
591                                         priv->too_many_reads = true;
592                                 }
593                         }
594
595                         updated = r_modification_order(curr, rf);
596                         read_from(curr, rf);
597                         mo_graph->commitChanges();
598                         mo_check_promises(curr, true);
599                         break;
600                 }
601                 case READ_FROM_PROMISE: {
602                         Promise *promise = curr->get_node()->get_read_from_promise();
603                         if (promise->add_reader(curr))
604                                 priv->failed_promise = true;
605                         curr->set_read_from_promise(promise);
606                         mo_graph->startChanges();
607                         if (!check_recency(curr, promise))
608                                 priv->too_many_reads = true;
609                         updated = r_modification_order(curr, promise);
610                         mo_graph->commitChanges();
611                         break;
612                 }
613                 case READ_FROM_FUTURE: {
614                         /* Read from future value */
615                         struct future_value fv = node->get_future_value();
616                         Promise *promise = new Promise(this, curr, fv);
617                         curr->set_read_from_promise(promise);
618                         promises.push_back(promise);
619                         mo_graph->startChanges();
620                         updated = r_modification_order(curr, promise);
621                         mo_graph->commitChanges();
622                         break;
623                 }
624                 default:
625                         ASSERT(false);
626                 }
627                 get_thread(curr)->set_return_value(curr->get_return_value());
628                 return updated;
629         }
630 }
631
632 /**
633  * Processes a lock, trylock, or unlock model action.  @param curr is
634  * the read model action to process.
635  *
636  * The try lock operation checks whether the lock is taken.  If not,
637  * it falls to the normal lock operation case.  If so, it returns
638  * fail.
639  *
640  * The lock operation has already been checked that it is enabled, so
641  * it just grabs the lock and synchronizes with the previous unlock.
642  *
643  * The unlock operation has to re-enable all of the threads that are
644  * waiting on the lock.
645  *
646  * @return True if synchronization was updated; false otherwise
647  */
648 bool ModelExecution::process_mutex(ModelAction *curr)
649 {
650         std::mutex *mutex = curr->get_mutex();
651         struct std::mutex_state *state = NULL;
652
653         if (mutex)
654                 state = mutex->get_state();
655
656         switch (curr->get_type()) {
657         case ATOMIC_TRYLOCK: {
658                 bool success = !state->locked;
659                 curr->set_try_lock(success);
660                 if (!success) {
661                         get_thread(curr)->set_return_value(0);
662                         break;
663                 }
664                 get_thread(curr)->set_return_value(1);
665         }
666                 //otherwise fall into the lock case
667         case ATOMIC_LOCK: {
668                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
669                         assert_bug("Lock access before initialization");
670                 state->locked = get_thread(curr);
671                 ModelAction *unlock = get_last_unlock(curr);
672                 //synchronize with the previous unlock statement
673                 if (unlock != NULL) {
674                         synchronize(unlock, curr);
675                         return true;
676                 }
677                 break;
678         }
679         case ATOMIC_WAIT:
680         case ATOMIC_UNLOCK: {
681                 /* wake up the other threads */
682                 for (unsigned int i = 0; i < get_num_threads(); i++) {
683                         Thread *t = get_thread(int_to_id(i));
684                         Thread *curr_thrd = get_thread(curr);
685                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
686                                 scheduler->wake(t);
687                 }
688
689                 /* unlock the lock - after checking who was waiting on it */
690                 state->locked = NULL;
691
692                 if (!curr->is_wait())
693                         break; /* The rest is only for ATOMIC_WAIT */
694
695                 /* Should we go to sleep? (simulate spurious failures) */
696                 if (curr->get_node()->get_misc() == 0) {
697                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
698                         /* disable us */
699                         scheduler->sleep(get_thread(curr));
700                 }
701                 break;
702         }
703         case ATOMIC_NOTIFY_ALL: {
704                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
705                 //activate all the waiting threads
706                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
707                         scheduler->wake(get_thread(*rit));
708                 }
709                 waiters->clear();
710                 break;
711         }
712         case ATOMIC_NOTIFY_ONE: {
713                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
714                 int wakeupthread = curr->get_node()->get_misc();
715                 action_list_t::iterator it = waiters->begin();
716                 advance(it, wakeupthread);
717                 scheduler->wake(get_thread(*it));
718                 waiters->erase(it);
719                 break;
720         }
721
722         default:
723                 ASSERT(0);
724         }
725         return false;
726 }
727
728 /**
729  * @brief Check if the current pending promises allow a future value to be sent
730  *
731  * If one of the following is true:
732  *  (a) there are no pending promises
733  *  (b) the reader and writer do not cross any promises
734  * Then, it is safe to pass a future value back now.
735  *
736  * Otherwise, we must save the pending future value until (a) or (b) is true
737  *
738  * @param writer The operation which sends the future value. Must be a write.
739  * @param reader The operation which will observe the value. Must be a read.
740  * @return True if the future value can be sent now; false if it must wait.
741  */
742 bool ModelExecution::promises_may_allow(const ModelAction *writer,
743                 const ModelAction *reader) const
744 {
745         if (promises.empty())
746                 return true;
747         for (int i = promises.size() - 1; i >= 0; i--) {
748                 ModelAction *pr = promises[i]->get_reader(0);
749                 //reader is after promise...doesn't cross any promise
750                 if (*reader > *pr)
751                         return true;
752                 //writer is after promise, reader before...bad...
753                 if (*writer > *pr)
754                         return false;
755         }
756         return true;
757 }
758
759 /**
760  * @brief Add a future value to a reader
761  *
762  * This function performs a few additional checks to ensure that the future
763  * value can be feasibly observed by the reader
764  *
765  * @param writer The operation whose value is sent. Must be a write.
766  * @param reader The read operation which may read the future value. Must be a read.
767  */
768 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
769 {
770         /* Do more ambitious checks now that mo is more complete */
771         if (!mo_may_allow(writer, reader))
772                 return;
773
774         Node *node = reader->get_node();
775
776         /* Find an ancestor thread which exists at the time of the reader */
777         Thread *write_thread = get_thread(writer);
778         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
779                 write_thread = write_thread->get_parent();
780
781         struct future_value fv = {
782                 writer->get_write_value(),
783                 writer->get_seq_number() + params->maxfuturedelay,
784                 write_thread->get_id(),
785         };
786         if (node->add_future_value(fv))
787                 set_latest_backtrack(reader);
788 }
789
790 /**
791  * Process a write ModelAction
792  * @param curr The ModelAction to process
793  * @return True if the mo_graph was updated or promises were resolved
794  */
795 bool ModelExecution::process_write(ModelAction *curr)
796 {
797         /* Readers to which we may send our future value */
798         ModelVector<ModelAction *> send_fv;
799
800         const ModelAction *earliest_promise_reader;
801         bool updated_promises = false;
802
803         bool updated_mod_order = w_modification_order(curr, &send_fv);
804         Promise *promise = pop_promise_to_resolve(curr);
805
806         if (promise) {
807                 earliest_promise_reader = promise->get_reader(0);
808                 updated_promises = resolve_promise(curr, promise);
809         } else
810                 earliest_promise_reader = NULL;
811
812         for (unsigned int i = 0; i < send_fv.size(); i++) {
813                 ModelAction *read = send_fv[i];
814
815                 /* Don't send future values to reads after the Promise we resolve */
816                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
817                         /* Check if future value can be sent immediately */
818                         if (promises_may_allow(curr, read)) {
819                                 add_future_value(curr, read);
820                         } else {
821                                 futurevalues.push_back(PendingFutureValue(curr, read));
822                         }
823                 }
824         }
825
826         /* Check the pending future values */
827         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
828                 struct PendingFutureValue pfv = futurevalues[i];
829                 if (promises_may_allow(pfv.writer, pfv.reader)) {
830                         add_future_value(pfv.writer, pfv.reader);
831                         futurevalues.erase(futurevalues.begin() + i);
832                 }
833         }
834
835         mo_graph->commitChanges();
836         mo_check_promises(curr, false);
837
838         get_thread(curr)->set_return_value(VALUE_NONE);
839         return updated_mod_order || updated_promises;
840 }
841
842 /**
843  * Process a fence ModelAction
844  * @param curr The ModelAction to process
845  * @return True if synchronization was updated
846  */
847 bool ModelExecution::process_fence(ModelAction *curr)
848 {
849         /*
850          * fence-relaxed: no-op
851          * fence-release: only log the occurence (not in this function), for
852          *   use in later synchronization
853          * fence-acquire (this function): search for hypothetical release
854          *   sequences
855          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
856          */
857         bool updated = false;
858         if (curr->is_acquire()) {
859                 action_list_t *list = action_trace;
860                 action_list_t::reverse_iterator rit;
861                 /* Find X : is_read(X) && X --sb-> curr */
862                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
863                         ModelAction *act = *rit;
864                         if (act == curr)
865                                 continue;
866                         if (act->get_tid() != curr->get_tid())
867                                 continue;
868                         /* Stop at the beginning of the thread */
869                         if (act->is_thread_start())
870                                 break;
871                         /* Stop once we reach a prior fence-acquire */
872                         if (act->is_fence() && act->is_acquire())
873                                 break;
874                         if (!act->is_read())
875                                 continue;
876                         /* read-acquire will find its own release sequences */
877                         if (act->is_acquire())
878                                 continue;
879
880                         /* Establish hypothetical release sequences */
881                         rel_heads_list_t release_heads;
882                         get_release_seq_heads(curr, act, &release_heads);
883                         for (unsigned int i = 0; i < release_heads.size(); i++)
884                                 synchronize(release_heads[i], curr);
885                         if (release_heads.size() != 0)
886                                 updated = true;
887                 }
888         }
889         return updated;
890 }
891
892 /**
893  * @brief Process the current action for thread-related activity
894  *
895  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
896  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
897  * synchronization, etc.  This function is a no-op for non-THREAD actions
898  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
899  *
900  * @param curr The current action
901  * @return True if synchronization was updated or a thread completed
902  */
903 bool ModelExecution::process_thread_action(ModelAction *curr)
904 {
905         bool updated = false;
906
907         switch (curr->get_type()) {
908         case THREAD_CREATE: {
909                 thrd_t *thrd = (thrd_t *)curr->get_location();
910                 struct thread_params *params = (struct thread_params *)curr->get_value();
911                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
912                 add_thread(th);
913                 th->set_creation(curr);
914                 /* Promises can be satisfied by children */
915                 for (unsigned int i = 0; i < promises.size(); i++) {
916                         Promise *promise = promises[i];
917                         if (promise->thread_is_available(curr->get_tid()))
918                                 promise->add_thread(th->get_id());
919                 }
920                 break;
921         }
922         case THREAD_JOIN: {
923                 Thread *blocking = curr->get_thread_operand();
924                 ModelAction *act = get_last_action(blocking->get_id());
925                 synchronize(act, curr);
926                 updated = true; /* trigger rel-seq checks */
927                 break;
928         }
929         case THREAD_FINISH: {
930                 Thread *th = get_thread(curr);
931                 /* Wake up any joining threads */
932                 for (unsigned int i = 0; i < get_num_threads(); i++) {
933                         Thread *waiting = get_thread(int_to_id(i));
934                         if (waiting->waiting_on() == th &&
935                                         waiting->get_pending()->is_thread_join())
936                                 scheduler->wake(waiting);
937                 }
938                 th->complete();
939                 /* Completed thread can't satisfy promises */
940                 for (unsigned int i = 0; i < promises.size(); i++) {
941                         Promise *promise = promises[i];
942                         if (promise->thread_is_available(th->get_id()))
943                                 if (promise->eliminate_thread(th->get_id()))
944                                         priv->failed_promise = true;
945                 }
946                 updated = true; /* trigger rel-seq checks */
947                 break;
948         }
949         case THREAD_START: {
950                 check_promises(curr->get_tid(), NULL, curr->get_cv());
951                 break;
952         }
953         default:
954                 break;
955         }
956
957         return updated;
958 }
959
960 /**
961  * @brief Process the current action for release sequence fixup activity
962  *
963  * Performs model-checker release sequence fixups for the current action,
964  * forcing a single pending release sequence to break (with a given, potential
965  * "loose" write) or to complete (i.e., synchronize). If a pending release
966  * sequence forms a complete release sequence, then we must perform the fixup
967  * synchronization, mo_graph additions, etc.
968  *
969  * @param curr The current action; must be a release sequence fixup action
970  * @param work_queue The work queue to which to add work items as they are
971  * generated
972  */
973 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
974 {
975         const ModelAction *write = curr->get_node()->get_relseq_break();
976         struct release_seq *sequence = pending_rel_seqs.back();
977         pending_rel_seqs.pop_back();
978         ASSERT(sequence);
979         ModelAction *acquire = sequence->acquire;
980         const ModelAction *rf = sequence->rf;
981         const ModelAction *release = sequence->release;
982         ASSERT(acquire);
983         ASSERT(release);
984         ASSERT(rf);
985         ASSERT(release->same_thread(rf));
986
987         if (write == NULL) {
988                 /**
989                  * @todo Forcing a synchronization requires that we set
990                  * modification order constraints. For instance, we can't allow
991                  * a fixup sequence in which two separate read-acquire
992                  * operations read from the same sequence, where the first one
993                  * synchronizes and the other doesn't. Essentially, we can't
994                  * allow any writes to insert themselves between 'release' and
995                  * 'rf'
996                  */
997
998                 /* Must synchronize */
999                 if (!synchronize(release, acquire))
1000                         return;
1001                 /* Re-check all pending release sequences */
1002                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1003                 /* Re-check act for mo_graph edges */
1004                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1005
1006                 /* propagate synchronization to later actions */
1007                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1008                 for (; (*rit) != acquire; rit++) {
1009                         ModelAction *propagate = *rit;
1010                         if (acquire->happens_before(propagate)) {
1011                                 synchronize(acquire, propagate);
1012                                 /* Re-check 'propagate' for mo_graph edges */
1013                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1014                         }
1015                 }
1016         } else {
1017                 /* Break release sequence with new edges:
1018                  *   release --mo--> write --mo--> rf */
1019                 mo_graph->addEdge(release, write);
1020                 mo_graph->addEdge(write, rf);
1021         }
1022
1023         /* See if we have realized a data race */
1024         checkDataRaces();
1025 }
1026
1027 /**
1028  * Initialize the current action by performing one or more of the following
1029  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1030  * in the NodeStack, manipulating backtracking sets, allocating and
1031  * initializing clock vectors, and computing the promises to fulfill.
1032  *
1033  * @param curr The current action, as passed from the user context; may be
1034  * freed/invalidated after the execution of this function, with a different
1035  * action "returned" its place (pass-by-reference)
1036  * @return True if curr is a newly-explored action; false otherwise
1037  */
1038 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1039 {
1040         ModelAction *newcurr;
1041
1042         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1043                 newcurr = process_rmw(*curr);
1044                 delete *curr;
1045
1046                 if (newcurr->is_rmw())
1047                         compute_promises(newcurr);
1048
1049                 *curr = newcurr;
1050                 return false;
1051         }
1052
1053         (*curr)->set_seq_number(get_next_seq_num());
1054
1055         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1056         if (newcurr) {
1057                 /* First restore type and order in case of RMW operation */
1058                 if ((*curr)->is_rmwr())
1059                         newcurr->copy_typeandorder(*curr);
1060
1061                 ASSERT((*curr)->get_location() == newcurr->get_location());
1062                 newcurr->copy_from_new(*curr);
1063
1064                 /* Discard duplicate ModelAction; use action from NodeStack */
1065                 delete *curr;
1066
1067                 /* Always compute new clock vector */
1068                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1069
1070                 *curr = newcurr;
1071                 return false; /* Action was explored previously */
1072         } else {
1073                 newcurr = *curr;
1074
1075                 /* Always compute new clock vector */
1076                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1077
1078                 /* Assign most recent release fence */
1079                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1080
1081                 /*
1082                  * Perform one-time actions when pushing new ModelAction onto
1083                  * NodeStack
1084                  */
1085                 if (newcurr->is_write())
1086                         compute_promises(newcurr);
1087                 else if (newcurr->is_relseq_fixup())
1088                         compute_relseq_breakwrites(newcurr);
1089                 else if (newcurr->is_wait())
1090                         newcurr->get_node()->set_misc_max(2);
1091                 else if (newcurr->is_notify_one()) {
1092                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1093                 }
1094                 return true; /* This was a new ModelAction */
1095         }
1096 }
1097
1098 /**
1099  * @brief Establish reads-from relation between two actions
1100  *
1101  * Perform basic operations involved with establishing a concrete rf relation,
1102  * including setting the ModelAction data and checking for release sequences.
1103  *
1104  * @param act The action that is reading (must be a read)
1105  * @param rf The action from which we are reading (must be a write)
1106  *
1107  * @return True if this read established synchronization
1108  */
1109 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1110 {
1111         ASSERT(rf);
1112         ASSERT(rf->is_write());
1113
1114         act->set_read_from(rf);
1115         if (act->is_acquire()) {
1116                 rel_heads_list_t release_heads;
1117                 get_release_seq_heads(act, act, &release_heads);
1118                 int num_heads = release_heads.size();
1119                 for (unsigned int i = 0; i < release_heads.size(); i++)
1120                         if (!synchronize(release_heads[i], act))
1121                                 num_heads--;
1122                 return num_heads > 0;
1123         }
1124         return false;
1125 }
1126
1127 /**
1128  * @brief Synchronizes two actions
1129  *
1130  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1131  * This function performs the synchronization as well as providing other hooks
1132  * for other checks along with synchronization.
1133  *
1134  * @param first The left-hand side of the synchronizes-with relation
1135  * @param second The right-hand side of the synchronizes-with relation
1136  * @return True if the synchronization was successful (i.e., was consistent
1137  * with the execution order); false otherwise
1138  */
1139 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1140 {
1141         if (*second < *first) {
1142                 set_bad_synchronization();
1143                 return false;
1144         }
1145         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1146         return second->synchronize_with(first);
1147 }
1148
1149 /**
1150  * Check promises and eliminate potentially-satisfying threads when a thread is
1151  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1152  * no longer satisfy a promise generated from that thread.
1153  *
1154  * @param blocker The thread on which a thread is waiting
1155  * @param waiting The waiting thread
1156  */
1157 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1158 {
1159         for (unsigned int i = 0; i < promises.size(); i++) {
1160                 Promise *promise = promises[i];
1161                 if (!promise->thread_is_available(waiting->get_id()))
1162                         continue;
1163                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1164                         ModelAction *reader = promise->get_reader(j);
1165                         if (reader->get_tid() != blocker->get_id())
1166                                 continue;
1167                         if (promise->eliminate_thread(waiting->get_id())) {
1168                                 /* Promise has failed */
1169                                 priv->failed_promise = true;
1170                         } else {
1171                                 /* Only eliminate the 'waiting' thread once */
1172                                 return;
1173                         }
1174                 }
1175         }
1176 }
1177
1178 /**
1179  * @brief Check whether a model action is enabled.
1180  *
1181  * Checks whether a lock or join operation would be successful (i.e., is the
1182  * lock already locked, or is the joined thread already complete). If not, put
1183  * the action in a waiter list.
1184  *
1185  * @param curr is the ModelAction to check whether it is enabled.
1186  * @return a bool that indicates whether the action is enabled.
1187  */
1188 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1189         if (curr->is_lock()) {
1190                 std::mutex *lock = curr->get_mutex();
1191                 struct std::mutex_state *state = lock->get_state();
1192                 if (state->locked)
1193                         return false;
1194         } else if (curr->is_thread_join()) {
1195                 Thread *blocking = curr->get_thread_operand();
1196                 if (!blocking->is_complete()) {
1197                         thread_blocking_check_promises(blocking, get_thread(curr));
1198                         return false;
1199                 }
1200         }
1201
1202         return true;
1203 }
1204
1205 /**
1206  * This is the heart of the model checker routine. It performs model-checking
1207  * actions corresponding to a given "current action." Among other processes, it
1208  * calculates reads-from relationships, updates synchronization clock vectors,
1209  * forms a memory_order constraints graph, and handles replay/backtrack
1210  * execution when running permutations of previously-observed executions.
1211  *
1212  * @param curr The current action to process
1213  * @return The ModelAction that is actually executed; may be different than
1214  * curr; may be NULL, if the current action is not enabled to run
1215  */
1216 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1217 {
1218         ASSERT(curr);
1219         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1220         bool newly_explored = initialize_curr_action(&curr);
1221
1222         DBG();
1223
1224         wake_up_sleeping_actions(curr);
1225
1226         /* Compute fairness information for CHESS yield algorithm */
1227         if (params->yieldon) {
1228                 curr->get_node()->update_yield(scheduler);
1229         }
1230
1231         /* Add the action to lists before any other model-checking tasks */
1232         if (!second_part_of_rmw)
1233                 add_action_to_lists(curr);
1234
1235         /* Build may_read_from set for newly-created actions */
1236         if (newly_explored && curr->is_read())
1237                 build_may_read_from(curr);
1238
1239         /* Initialize work_queue with the "current action" work */
1240         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1241         while (!work_queue.empty() && !has_asserted()) {
1242                 WorkQueueEntry work = work_queue.front();
1243                 work_queue.pop_front();
1244
1245                 switch (work.type) {
1246                 case WORK_CHECK_CURR_ACTION: {
1247                         ModelAction *act = work.action;
1248                         bool update = false; /* update this location's release seq's */
1249                         bool update_all = false; /* update all release seq's */
1250
1251                         if (process_thread_action(curr))
1252                                 update_all = true;
1253
1254                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1255                                 update = true;
1256
1257                         if (act->is_write() && process_write(act))
1258                                 update = true;
1259
1260                         if (act->is_fence() && process_fence(act))
1261                                 update_all = true;
1262
1263                         if (act->is_mutex_op() && process_mutex(act))
1264                                 update_all = true;
1265
1266                         if (act->is_relseq_fixup())
1267                                 process_relseq_fixup(curr, &work_queue);
1268
1269                         if (update_all)
1270                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1271                         else if (update)
1272                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1273                         break;
1274                 }
1275                 case WORK_CHECK_RELEASE_SEQ:
1276                         resolve_release_sequences(work.location, &work_queue);
1277                         break;
1278                 case WORK_CHECK_MO_EDGES: {
1279                         /** @todo Complete verification of work_queue */
1280                         ModelAction *act = work.action;
1281                         bool updated = false;
1282
1283                         if (act->is_read()) {
1284                                 const ModelAction *rf = act->get_reads_from();
1285                                 const Promise *promise = act->get_reads_from_promise();
1286                                 if (rf) {
1287                                         if (r_modification_order(act, rf))
1288                                                 updated = true;
1289                                 } else if (promise) {
1290                                         if (r_modification_order(act, promise))
1291                                                 updated = true;
1292                                 }
1293                         }
1294                         if (act->is_write()) {
1295                                 if (w_modification_order(act, NULL))
1296                                         updated = true;
1297                         }
1298                         mo_graph->commitChanges();
1299
1300                         if (updated)
1301                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1302                         break;
1303                 }
1304                 default:
1305                         ASSERT(false);
1306                         break;
1307                 }
1308         }
1309
1310         check_curr_backtracking(curr);
1311         set_backtracking(curr);
1312         return curr;
1313 }
1314
1315 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1316 {
1317         Node *currnode = curr->get_node();
1318         Node *parnode = currnode->get_parent();
1319
1320         if ((parnode && !parnode->backtrack_empty()) ||
1321                          !currnode->misc_empty() ||
1322                          !currnode->read_from_empty() ||
1323                          !currnode->promise_empty() ||
1324                          !currnode->relseq_break_empty()) {
1325                 set_latest_backtrack(curr);
1326         }
1327 }
1328
1329 bool ModelExecution::promises_expired() const
1330 {
1331         for (unsigned int i = 0; i < promises.size(); i++) {
1332                 Promise *promise = promises[i];
1333                 if (promise->get_expiration() < priv->used_sequence_numbers)
1334                         return true;
1335         }
1336         return false;
1337 }
1338
1339 /**
1340  * This is the strongest feasibility check available.
1341  * @return whether the current trace (partial or complete) must be a prefix of
1342  * a feasible trace.
1343  */
1344 bool ModelExecution::isfeasibleprefix() const
1345 {
1346         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1347 }
1348
1349 /**
1350  * Print disagnostic information about an infeasible execution
1351  * @param prefix A string to prefix the output with; if NULL, then a default
1352  * message prefix will be provided
1353  */
1354 void ModelExecution::print_infeasibility(const char *prefix) const
1355 {
1356         char buf[100];
1357         char *ptr = buf;
1358         if (mo_graph->checkForCycles())
1359                 ptr += sprintf(ptr, "[mo cycle]");
1360         if (priv->failed_promise)
1361                 ptr += sprintf(ptr, "[failed promise]");
1362         if (priv->too_many_reads)
1363                 ptr += sprintf(ptr, "[too many reads]");
1364         if (priv->no_valid_reads)
1365                 ptr += sprintf(ptr, "[no valid reads-from]");
1366         if (priv->bad_synchronization)
1367                 ptr += sprintf(ptr, "[bad sw ordering]");
1368         if (promises_expired())
1369                 ptr += sprintf(ptr, "[promise expired]");
1370         if (promises.size() != 0)
1371                 ptr += sprintf(ptr, "[unresolved promise]");
1372         if (ptr != buf)
1373                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1374 }
1375
1376 /**
1377  * Returns whether the current completed trace is feasible, except for pending
1378  * release sequences.
1379  */
1380 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1381 {
1382         return !is_infeasible() && promises.size() == 0;
1383 }
1384
1385 /**
1386  * Check if the current partial trace is infeasible. Does not check any
1387  * end-of-execution flags, which might rule out the execution. Thus, this is
1388  * useful only for ruling an execution as infeasible.
1389  * @return whether the current partial trace is infeasible.
1390  */
1391 bool ModelExecution::is_infeasible() const
1392 {
1393         return mo_graph->checkForCycles() ||
1394                 priv->no_valid_reads ||
1395                 priv->failed_promise ||
1396                 priv->too_many_reads ||
1397                 priv->bad_synchronization ||
1398                 promises_expired();
1399 }
1400
1401 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1402 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1403         ModelAction *lastread = get_last_action(act->get_tid());
1404         lastread->process_rmw(act);
1405         if (act->is_rmw()) {
1406                 if (lastread->get_reads_from())
1407                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1408                 else
1409                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1410                 mo_graph->commitChanges();
1411         }
1412         return lastread;
1413 }
1414
1415 /**
1416  * A helper function for ModelExecution::check_recency, to check if the current
1417  * thread is able to read from a different write/promise for 'params.maxreads'
1418  * number of steps and if that write/promise should become visible (i.e., is
1419  * ordered later in the modification order). This helps model memory liveness.
1420  *
1421  * @param curr The current action. Must be a read.
1422  * @param rf The write/promise from which we plan to read
1423  * @param other_rf The write/promise from which we may read
1424  * @return True if we were able to read from other_rf for params.maxreads steps
1425  */
1426 template <typename T, typename U>
1427 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1428 {
1429         /* Need a different write/promise */
1430         if (other_rf->equals(rf))
1431                 return false;
1432
1433         /* Only look for "newer" writes/promises */
1434         if (!mo_graph->checkReachable(rf, other_rf))
1435                 return false;
1436
1437         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1438         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1439         action_list_t::reverse_iterator rit = list->rbegin();
1440         ASSERT((*rit) == curr);
1441         /* Skip past curr */
1442         rit++;
1443
1444         /* Does this write/promise work for everyone? */
1445         for (int i = 0; i < params->maxreads; i++, rit++) {
1446                 ModelAction *act = *rit;
1447                 if (!act->may_read_from(other_rf))
1448                         return false;
1449         }
1450         return true;
1451 }
1452
1453 /**
1454  * Checks whether a thread has read from the same write or Promise for too many
1455  * times without seeing the effects of a later write/Promise.
1456  *
1457  * Basic idea:
1458  * 1) there must a different write/promise that we could read from,
1459  * 2) we must have read from the same write/promise in excess of maxreads times,
1460  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1461  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1462  *
1463  * If so, we decide that the execution is no longer feasible.
1464  *
1465  * @param curr The current action. Must be a read.
1466  * @param rf The ModelAction/Promise from which we might read.
1467  * @return True if the read should succeed; false otherwise
1468  */
1469 template <typename T>
1470 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1471 {
1472         if (!params->maxreads)
1473                 return true;
1474
1475         //NOTE: Next check is just optimization, not really necessary....
1476         if (curr->get_node()->get_read_from_past_size() +
1477                         curr->get_node()->get_read_from_promise_size() <= 1)
1478                 return true;
1479
1480         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1481         int tid = id_to_int(curr->get_tid());
1482         ASSERT(tid < (int)thrd_lists->size());
1483         action_list_t *list = &(*thrd_lists)[tid];
1484         action_list_t::reverse_iterator rit = list->rbegin();
1485         ASSERT((*rit) == curr);
1486         /* Skip past curr */
1487         rit++;
1488
1489         action_list_t::reverse_iterator ritcopy = rit;
1490         /* See if we have enough reads from the same value */
1491         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1492                 if (ritcopy == list->rend())
1493                         return true;
1494                 ModelAction *act = *ritcopy;
1495                 if (!act->is_read())
1496                         return true;
1497                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1498                         return true;
1499                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1500                         return true;
1501                 if (act->get_node()->get_read_from_past_size() +
1502                                 act->get_node()->get_read_from_promise_size() <= 1)
1503                         return true;
1504         }
1505         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1506                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1507                 if (should_read_instead(curr, rf, write))
1508                         return false; /* liveness failure */
1509         }
1510         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1511                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1512                 if (should_read_instead(curr, rf, promise))
1513                         return false; /* liveness failure */
1514         }
1515         return true;
1516 }
1517
1518 /**
1519  * @brief Updates the mo_graph with the constraints imposed from the current
1520  * read.
1521  *
1522  * Basic idea is the following: Go through each other thread and find
1523  * the last action that happened before our read.  Two cases:
1524  *
1525  * -# The action is a write: that write must either occur before
1526  * the write we read from or be the write we read from.
1527  * -# The action is a read: the write that that action read from
1528  * must occur before the write we read from or be the same write.
1529  *
1530  * @param curr The current action. Must be a read.
1531  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1532  * @return True if modification order edges were added; false otherwise
1533  */
1534 template <typename rf_type>
1535 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1536 {
1537         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1538         unsigned int i;
1539         bool added = false;
1540         ASSERT(curr->is_read());
1541
1542         /* Last SC fence in the current thread */
1543         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1544         ModelAction *last_sc_write = NULL;
1545         if (curr->is_seqcst())
1546                 last_sc_write = get_last_seq_cst_write(curr);
1547
1548         /* Iterate over all threads */
1549         for (i = 0; i < thrd_lists->size(); i++) {
1550                 /* Last SC fence in thread i */
1551                 ModelAction *last_sc_fence_thread_local = NULL;
1552                 if (int_to_id((int)i) != curr->get_tid())
1553                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1554
1555                 /* Last SC fence in thread i, before last SC fence in current thread */
1556                 ModelAction *last_sc_fence_thread_before = NULL;
1557                 if (last_sc_fence_local)
1558                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1559
1560                 /* Iterate over actions in thread, starting from most recent */
1561                 action_list_t *list = &(*thrd_lists)[i];
1562                 action_list_t::reverse_iterator rit;
1563                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1564                         ModelAction *act = *rit;
1565
1566                         /* Skip curr */
1567                         if (act == curr)
1568                                 continue;
1569                         /* Don't want to add reflexive edges on 'rf' */
1570                         if (act->equals(rf)) {
1571                                 if (act->happens_before(curr))
1572                                         break;
1573                                 else
1574                                         continue;
1575                         }
1576
1577                         if (act->is_write()) {
1578                                 /* C++, Section 29.3 statement 5 */
1579                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1580                                                 *act < *last_sc_fence_thread_local) {
1581                                         added = mo_graph->addEdge(act, rf) || added;
1582                                         break;
1583                                 }
1584                                 /* C++, Section 29.3 statement 4 */
1585                                 else if (act->is_seqcst() && last_sc_fence_local &&
1586                                                 *act < *last_sc_fence_local) {
1587                                         added = mo_graph->addEdge(act, rf) || added;
1588                                         break;
1589                                 }
1590                                 /* C++, Section 29.3 statement 6 */
1591                                 else if (last_sc_fence_thread_before &&
1592                                                 *act < *last_sc_fence_thread_before) {
1593                                         added = mo_graph->addEdge(act, rf) || added;
1594                                         break;
1595                                 }
1596                         }
1597
1598                         /* C++, Section 29.3 statement 3 (second subpoint) */
1599                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1600                                 added = mo_graph->addEdge(act, rf) || added;
1601                                 break;
1602                         }
1603
1604                         /*
1605                          * Include at most one act per-thread that "happens
1606                          * before" curr
1607                          */
1608                         if (act->happens_before(curr)) {
1609                                 if (act->is_write()) {
1610                                         added = mo_graph->addEdge(act, rf) || added;
1611                                 } else {
1612                                         const ModelAction *prevrf = act->get_reads_from();
1613                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1614                                         if (prevrf) {
1615                                                 if (!prevrf->equals(rf))
1616                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1617                                         } else if (!prevrf_promise->equals(rf)) {
1618                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1619                                         }
1620                                 }
1621                                 break;
1622                         }
1623                 }
1624         }
1625
1626         /*
1627          * All compatible, thread-exclusive promises must be ordered after any
1628          * concrete loads from the same thread
1629          */
1630         for (unsigned int i = 0; i < promises.size(); i++)
1631                 if (promises[i]->is_compatible_exclusive(curr))
1632                         added = mo_graph->addEdge(rf, promises[i]) || added;
1633
1634         return added;
1635 }
1636
1637 /**
1638  * Updates the mo_graph with the constraints imposed from the current write.
1639  *
1640  * Basic idea is the following: Go through each other thread and find
1641  * the lastest action that happened before our write.  Two cases:
1642  *
1643  * (1) The action is a write => that write must occur before
1644  * the current write
1645  *
1646  * (2) The action is a read => the write that that action read from
1647  * must occur before the current write.
1648  *
1649  * This method also handles two other issues:
1650  *
1651  * (I) Sequential Consistency: Making sure that if the current write is
1652  * seq_cst, that it occurs after the previous seq_cst write.
1653  *
1654  * (II) Sending the write back to non-synchronizing reads.
1655  *
1656  * @param curr The current action. Must be a write.
1657  * @param send_fv A vector for stashing reads to which we may pass our future
1658  * value. If NULL, then don't record any future values.
1659  * @return True if modification order edges were added; false otherwise
1660  */
1661 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1662 {
1663         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1664         unsigned int i;
1665         bool added = false;
1666         ASSERT(curr->is_write());
1667
1668         if (curr->is_seqcst()) {
1669                 /* We have to at least see the last sequentially consistent write,
1670                          so we are initialized. */
1671                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1672                 if (last_seq_cst != NULL) {
1673                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1674                 }
1675         }
1676
1677         /* Last SC fence in the current thread */
1678         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1679
1680         /* Iterate over all threads */
1681         for (i = 0; i < thrd_lists->size(); i++) {
1682                 /* Last SC fence in thread i, before last SC fence in current thread */
1683                 ModelAction *last_sc_fence_thread_before = NULL;
1684                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1685                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1686
1687                 /* Iterate over actions in thread, starting from most recent */
1688                 action_list_t *list = &(*thrd_lists)[i];
1689                 action_list_t::reverse_iterator rit;
1690                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1691                         ModelAction *act = *rit;
1692                         if (act == curr) {
1693                                 /*
1694                                  * 1) If RMW and it actually read from something, then we
1695                                  * already have all relevant edges, so just skip to next
1696                                  * thread.
1697                                  *
1698                                  * 2) If RMW and it didn't read from anything, we should
1699                                  * whatever edge we can get to speed up convergence.
1700                                  *
1701                                  * 3) If normal write, we need to look at earlier actions, so
1702                                  * continue processing list.
1703                                  */
1704                                 if (curr->is_rmw()) {
1705                                         if (curr->get_reads_from() != NULL)
1706                                                 break;
1707                                         else
1708                                                 continue;
1709                                 } else
1710                                         continue;
1711                         }
1712
1713                         /* C++, Section 29.3 statement 7 */
1714                         if (last_sc_fence_thread_before && act->is_write() &&
1715                                         *act < *last_sc_fence_thread_before) {
1716                                 added = mo_graph->addEdge(act, curr) || added;
1717                                 break;
1718                         }
1719
1720                         /*
1721                          * Include at most one act per-thread that "happens
1722                          * before" curr
1723                          */
1724                         if (act->happens_before(curr)) {
1725                                 /*
1726                                  * Note: if act is RMW, just add edge:
1727                                  *   act --mo--> curr
1728                                  * The following edge should be handled elsewhere:
1729                                  *   readfrom(act) --mo--> act
1730                                  */
1731                                 if (act->is_write())
1732                                         added = mo_graph->addEdge(act, curr) || added;
1733                                 else if (act->is_read()) {
1734                                         //if previous read accessed a null, just keep going
1735                                         if (act->get_reads_from() == NULL)
1736                                                 continue;
1737                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1738                                 }
1739                                 break;
1740                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1741                                                      !act->same_thread(curr)) {
1742                                 /* We have an action that:
1743                                    (1) did not happen before us
1744                                    (2) is a read and we are a write
1745                                    (3) cannot synchronize with us
1746                                    (4) is in a different thread
1747                                    =>
1748                                    that read could potentially read from our write.  Note that
1749                                    these checks are overly conservative at this point, we'll
1750                                    do more checks before actually removing the
1751                                    pendingfuturevalue.
1752
1753                                  */
1754                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1755                                         if (!is_infeasible())
1756                                                 send_fv->push_back(act);
1757                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1758                                                 add_future_value(curr, act);
1759                                 }
1760                         }
1761                 }
1762         }
1763
1764         /*
1765          * All compatible, thread-exclusive promises must be ordered after any
1766          * concrete stores to the same thread, or else they can be merged with
1767          * this store later
1768          */
1769         for (unsigned int i = 0; i < promises.size(); i++)
1770                 if (promises[i]->is_compatible_exclusive(curr))
1771                         added = mo_graph->addEdge(curr, promises[i]) || added;
1772
1773         return added;
1774 }
1775
1776 /** Arbitrary reads from the future are not allowed.  Section 29.3
1777  * part 9 places some constraints.  This method checks one result of constraint
1778  * constraint.  Others require compiler support. */
1779 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1780 {
1781         if (!writer->is_rmw())
1782                 return true;
1783
1784         if (!reader->is_rmw())
1785                 return true;
1786
1787         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1788                 if (search == reader)
1789                         return false;
1790                 if (search->get_tid() == reader->get_tid() &&
1791                                 search->happens_before(reader))
1792                         break;
1793         }
1794
1795         return true;
1796 }
1797
1798 /**
1799  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1800  * some constraints. This method checks one the following constraint (others
1801  * require compiler support):
1802  *
1803  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1804  */
1805 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1806 {
1807         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1808         unsigned int i;
1809         /* Iterate over all threads */
1810         for (i = 0; i < thrd_lists->size(); i++) {
1811                 const ModelAction *write_after_read = NULL;
1812
1813                 /* Iterate over actions in thread, starting from most recent */
1814                 action_list_t *list = &(*thrd_lists)[i];
1815                 action_list_t::reverse_iterator rit;
1816                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1817                         ModelAction *act = *rit;
1818
1819                         /* Don't disallow due to act == reader */
1820                         if (!reader->happens_before(act) || reader == act)
1821                                 break;
1822                         else if (act->is_write())
1823                                 write_after_read = act;
1824                         else if (act->is_read() && act->get_reads_from() != NULL)
1825                                 write_after_read = act->get_reads_from();
1826                 }
1827
1828                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1829                         return false;
1830         }
1831         return true;
1832 }
1833
1834 /**
1835  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1836  * The ModelAction under consideration is expected to be taking part in
1837  * release/acquire synchronization as an object of the "reads from" relation.
1838  * Note that this can only provide release sequence support for RMW chains
1839  * which do not read from the future, as those actions cannot be traced until
1840  * their "promise" is fulfilled. Similarly, we may not even establish the
1841  * presence of a release sequence with certainty, as some modification order
1842  * constraints may be decided further in the future. Thus, this function
1843  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1844  * and a boolean representing certainty.
1845  *
1846  * @param rf The action that might be part of a release sequence. Must be a
1847  * write.
1848  * @param release_heads A pass-by-reference style return parameter. After
1849  * execution of this function, release_heads will contain the heads of all the
1850  * relevant release sequences, if any exists with certainty
1851  * @param pending A pass-by-reference style return parameter which is only used
1852  * when returning false (i.e., uncertain). Returns most information regarding
1853  * an uncertain release sequence, including any write operations that might
1854  * break the sequence.
1855  * @return true, if the ModelExecution is certain that release_heads is complete;
1856  * false otherwise
1857  */
1858 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1859                 rel_heads_list_t *release_heads,
1860                 struct release_seq *pending) const
1861 {
1862         /* Only check for release sequences if there are no cycles */
1863         if (mo_graph->checkForCycles())
1864                 return false;
1865
1866         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1867                 ASSERT(rf->is_write());
1868
1869                 if (rf->is_release())
1870                         release_heads->push_back(rf);
1871                 else if (rf->get_last_fence_release())
1872                         release_heads->push_back(rf->get_last_fence_release());
1873                 if (!rf->is_rmw())
1874                         break; /* End of RMW chain */
1875
1876                 /** @todo Need to be smarter here...  In the linux lock
1877                  * example, this will run to the beginning of the program for
1878                  * every acquire. */
1879                 /** @todo The way to be smarter here is to keep going until 1
1880                  * thread has a release preceded by an acquire and you've seen
1881                  *       both. */
1882
1883                 /* acq_rel RMW is a sufficient stopping condition */
1884                 if (rf->is_acquire() && rf->is_release())
1885                         return true; /* complete */
1886         };
1887         if (!rf) {
1888                 /* read from future: need to settle this later */
1889                 pending->rf = NULL;
1890                 return false; /* incomplete */
1891         }
1892
1893         if (rf->is_release())
1894                 return true; /* complete */
1895
1896         /* else relaxed write
1897          * - check for fence-release in the same thread (29.8, stmt. 3)
1898          * - check modification order for contiguous subsequence
1899          *   -> rf must be same thread as release */
1900
1901         const ModelAction *fence_release = rf->get_last_fence_release();
1902         /* Synchronize with a fence-release unconditionally; we don't need to
1903          * find any more "contiguous subsequence..." for it */
1904         if (fence_release)
1905                 release_heads->push_back(fence_release);
1906
1907         int tid = id_to_int(rf->get_tid());
1908         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1909         action_list_t *list = &(*thrd_lists)[tid];
1910         action_list_t::const_reverse_iterator rit;
1911
1912         /* Find rf in the thread list */
1913         rit = std::find(list->rbegin(), list->rend(), rf);
1914         ASSERT(rit != list->rend());
1915
1916         /* Find the last {write,fence}-release */
1917         for (; rit != list->rend(); rit++) {
1918                 if (fence_release && *(*rit) < *fence_release)
1919                         break;
1920                 if ((*rit)->is_release())
1921                         break;
1922         }
1923         if (rit == list->rend()) {
1924                 /* No write-release in this thread */
1925                 return true; /* complete */
1926         } else if (fence_release && *(*rit) < *fence_release) {
1927                 /* The fence-release is more recent (and so, "stronger") than
1928                  * the most recent write-release */
1929                 return true; /* complete */
1930         } /* else, need to establish contiguous release sequence */
1931         ModelAction *release = *rit;
1932
1933         ASSERT(rf->same_thread(release));
1934
1935         pending->writes.clear();
1936
1937         bool certain = true;
1938         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1939                 if (id_to_int(rf->get_tid()) == (int)i)
1940                         continue;
1941                 list = &(*thrd_lists)[i];
1942
1943                 /* Can we ensure no future writes from this thread may break
1944                  * the release seq? */
1945                 bool future_ordered = false;
1946
1947                 ModelAction *last = get_last_action(int_to_id(i));
1948                 Thread *th = get_thread(int_to_id(i));
1949                 if ((last && rf->happens_before(last)) ||
1950                                 !is_enabled(th) ||
1951                                 th->is_complete())
1952                         future_ordered = true;
1953
1954                 ASSERT(!th->is_model_thread() || future_ordered);
1955
1956                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1957                         const ModelAction *act = *rit;
1958                         /* Reach synchronization -> this thread is complete */
1959                         if (act->happens_before(release))
1960                                 break;
1961                         if (rf->happens_before(act)) {
1962                                 future_ordered = true;
1963                                 continue;
1964                         }
1965
1966                         /* Only non-RMW writes can break release sequences */
1967                         if (!act->is_write() || act->is_rmw())
1968                                 continue;
1969
1970                         /* Check modification order */
1971                         if (mo_graph->checkReachable(rf, act)) {
1972                                 /* rf --mo--> act */
1973                                 future_ordered = true;
1974                                 continue;
1975                         }
1976                         if (mo_graph->checkReachable(act, release))
1977                                 /* act --mo--> release */
1978                                 break;
1979                         if (mo_graph->checkReachable(release, act) &&
1980                                       mo_graph->checkReachable(act, rf)) {
1981                                 /* release --mo-> act --mo--> rf */
1982                                 return true; /* complete */
1983                         }
1984                         /* act may break release sequence */
1985                         pending->writes.push_back(act);
1986                         certain = false;
1987                 }
1988                 if (!future_ordered)
1989                         certain = false; /* This thread is uncertain */
1990         }
1991
1992         if (certain) {
1993                 release_heads->push_back(release);
1994                 pending->writes.clear();
1995         } else {
1996                 pending->release = release;
1997                 pending->rf = rf;
1998         }
1999         return certain;
2000 }
2001
2002 /**
2003  * An interface for getting the release sequence head(s) with which a
2004  * given ModelAction must synchronize. This function only returns a non-empty
2005  * result when it can locate a release sequence head with certainty. Otherwise,
2006  * it may mark the internal state of the ModelExecution so that it will handle
2007  * the release sequence at a later time, causing @a acquire to update its
2008  * synchronization at some later point in execution.
2009  *
2010  * @param acquire The 'acquire' action that may synchronize with a release
2011  * sequence
2012  * @param read The read action that may read from a release sequence; this may
2013  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2014  * when 'acquire' is a fence-acquire)
2015  * @param release_heads A pass-by-reference return parameter. Will be filled
2016  * with the head(s) of the release sequence(s), if they exists with certainty.
2017  * @see ModelExecution::release_seq_heads
2018  */
2019 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2020                 ModelAction *read, rel_heads_list_t *release_heads)
2021 {
2022         const ModelAction *rf = read->get_reads_from();
2023         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2024         sequence->acquire = acquire;
2025         sequence->read = read;
2026
2027         if (!release_seq_heads(rf, release_heads, sequence)) {
2028                 /* add act to 'lazy checking' list */
2029                 pending_rel_seqs.push_back(sequence);
2030         } else {
2031                 snapshot_free(sequence);
2032         }
2033 }
2034
2035 /**
2036  * Attempt to resolve all stashed operations that might synchronize with a
2037  * release sequence for a given location. This implements the "lazy" portion of
2038  * determining whether or not a release sequence was contiguous, since not all
2039  * modification order information is present at the time an action occurs.
2040  *
2041  * @param location The location/object that should be checked for release
2042  * sequence resolutions. A NULL value means to check all locations.
2043  * @param work_queue The work queue to which to add work items as they are
2044  * generated
2045  * @return True if any updates occurred (new synchronization, new mo_graph
2046  * edges)
2047  */
2048 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2049 {
2050         bool updated = false;
2051         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2052         while (it != pending_rel_seqs.end()) {
2053                 struct release_seq *pending = *it;
2054                 ModelAction *acquire = pending->acquire;
2055                 const ModelAction *read = pending->read;
2056
2057                 /* Only resolve sequences on the given location, if provided */
2058                 if (location && read->get_location() != location) {
2059                         it++;
2060                         continue;
2061                 }
2062
2063                 const ModelAction *rf = read->get_reads_from();
2064                 rel_heads_list_t release_heads;
2065                 bool complete;
2066                 complete = release_seq_heads(rf, &release_heads, pending);
2067                 for (unsigned int i = 0; i < release_heads.size(); i++)
2068                         if (!acquire->has_synchronized_with(release_heads[i]))
2069                                 if (synchronize(release_heads[i], acquire))
2070                                         updated = true;
2071
2072                 if (updated) {
2073                         /* Re-check all pending release sequences */
2074                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2075                         /* Re-check read-acquire for mo_graph edges */
2076                         if (acquire->is_read())
2077                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2078
2079                         /* propagate synchronization to later actions */
2080                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2081                         for (; (*rit) != acquire; rit++) {
2082                                 ModelAction *propagate = *rit;
2083                                 if (acquire->happens_before(propagate)) {
2084                                         synchronize(acquire, propagate);
2085                                         /* Re-check 'propagate' for mo_graph edges */
2086                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2087                                 }
2088                         }
2089                 }
2090                 if (complete) {
2091                         it = pending_rel_seqs.erase(it);
2092                         snapshot_free(pending);
2093                 } else {
2094                         it++;
2095                 }
2096         }
2097
2098         // If we resolved promises or data races, see if we have realized a data race.
2099         checkDataRaces();
2100
2101         return updated;
2102 }
2103
2104 /**
2105  * Performs various bookkeeping operations for the current ModelAction. For
2106  * instance, adds action to the per-object, per-thread action vector and to the
2107  * action trace list of all thread actions.
2108  *
2109  * @param act is the ModelAction to add.
2110  */
2111 void ModelExecution::add_action_to_lists(ModelAction *act)
2112 {
2113         int tid = id_to_int(act->get_tid());
2114         ModelAction *uninit = NULL;
2115         int uninit_id = -1;
2116         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2117         if (list->empty() && act->is_atomic_var()) {
2118                 uninit = get_uninitialized_action(act);
2119                 uninit_id = id_to_int(uninit->get_tid());
2120                 list->push_front(uninit);
2121         }
2122         list->push_back(act);
2123
2124         action_trace->push_back(act);
2125         if (uninit)
2126                 action_trace->push_front(uninit);
2127
2128         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2129         if (tid >= (int)vec->size())
2130                 vec->resize(priv->next_thread_id);
2131         (*vec)[tid].push_back(act);
2132         if (uninit)
2133                 (*vec)[uninit_id].push_front(uninit);
2134
2135         if ((int)thrd_last_action.size() <= tid)
2136                 thrd_last_action.resize(get_num_threads());
2137         thrd_last_action[tid] = act;
2138         if (uninit)
2139                 thrd_last_action[uninit_id] = uninit;
2140
2141         if (act->is_fence() && act->is_release()) {
2142                 if ((int)thrd_last_fence_release.size() <= tid)
2143                         thrd_last_fence_release.resize(get_num_threads());
2144                 thrd_last_fence_release[tid] = act;
2145         }
2146
2147         if (act->is_wait()) {
2148                 void *mutex_loc = (void *) act->get_value();
2149                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2150
2151                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2152                 if (tid >= (int)vec->size())
2153                         vec->resize(priv->next_thread_id);
2154                 (*vec)[tid].push_back(act);
2155         }
2156 }
2157
2158 /**
2159  * @brief Get the last action performed by a particular Thread
2160  * @param tid The thread ID of the Thread in question
2161  * @return The last action in the thread
2162  */
2163 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2164 {
2165         int threadid = id_to_int(tid);
2166         if (threadid < (int)thrd_last_action.size())
2167                 return thrd_last_action[id_to_int(tid)];
2168         else
2169                 return NULL;
2170 }
2171
2172 /**
2173  * @brief Get the last fence release performed by a particular Thread
2174  * @param tid The thread ID of the Thread in question
2175  * @return The last fence release in the thread, if one exists; NULL otherwise
2176  */
2177 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2178 {
2179         int threadid = id_to_int(tid);
2180         if (threadid < (int)thrd_last_fence_release.size())
2181                 return thrd_last_fence_release[id_to_int(tid)];
2182         else
2183                 return NULL;
2184 }
2185
2186 /**
2187  * Gets the last memory_order_seq_cst write (in the total global sequence)
2188  * performed on a particular object (i.e., memory location), not including the
2189  * current action.
2190  * @param curr The current ModelAction; also denotes the object location to
2191  * check
2192  * @return The last seq_cst write
2193  */
2194 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2195 {
2196         void *location = curr->get_location();
2197         action_list_t *list = get_safe_ptr_action(obj_map, location);
2198         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2199         action_list_t::reverse_iterator rit;
2200         for (rit = list->rbegin(); (*rit) != curr; rit++)
2201                 ;
2202         rit++; /* Skip past curr */
2203         for ( ; rit != list->rend(); rit++)
2204                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2205                         return *rit;
2206         return NULL;
2207 }
2208
2209 /**
2210  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2211  * performed in a particular thread, prior to a particular fence.
2212  * @param tid The ID of the thread to check
2213  * @param before_fence The fence from which to begin the search; if NULL, then
2214  * search for the most recent fence in the thread.
2215  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2216  */
2217 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2218 {
2219         /* All fences should have NULL location */
2220         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2221         action_list_t::reverse_iterator rit = list->rbegin();
2222
2223         if (before_fence) {
2224                 for (; rit != list->rend(); rit++)
2225                         if (*rit == before_fence)
2226                                 break;
2227
2228                 ASSERT(*rit == before_fence);
2229                 rit++;
2230         }
2231
2232         for (; rit != list->rend(); rit++)
2233                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2234                         return *rit;
2235         return NULL;
2236 }
2237
2238 /**
2239  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2240  * location). This function identifies the mutex according to the current
2241  * action, which is presumed to perform on the same mutex.
2242  * @param curr The current ModelAction; also denotes the object location to
2243  * check
2244  * @return The last unlock operation
2245  */
2246 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2247 {
2248         void *location = curr->get_location();
2249         action_list_t *list = get_safe_ptr_action(obj_map, location);
2250         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2251         action_list_t::reverse_iterator rit;
2252         for (rit = list->rbegin(); rit != list->rend(); rit++)
2253                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2254                         return *rit;
2255         return NULL;
2256 }
2257
2258 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2259 {
2260         ModelAction *parent = get_last_action(tid);
2261         if (!parent)
2262                 parent = get_thread(tid)->get_creation();
2263         return parent;
2264 }
2265
2266 /**
2267  * Returns the clock vector for a given thread.
2268  * @param tid The thread whose clock vector we want
2269  * @return Desired clock vector
2270  */
2271 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2272 {
2273         return get_parent_action(tid)->get_cv();
2274 }
2275
2276 /**
2277  * @brief Find the promise (if any) to resolve for the current action and
2278  * remove it from the pending promise vector
2279  * @param curr The current ModelAction. Should be a write.
2280  * @return The Promise to resolve, if any; otherwise NULL
2281  */
2282 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2283 {
2284         for (unsigned int i = 0; i < promises.size(); i++)
2285                 if (curr->get_node()->get_promise(i)) {
2286                         Promise *ret = promises[i];
2287                         promises.erase(promises.begin() + i);
2288                         return ret;
2289                 }
2290         return NULL;
2291 }
2292
2293 /**
2294  * Resolve a Promise with a current write.
2295  * @param write The ModelAction that is fulfilling Promises
2296  * @param promise The Promise to resolve
2297  * @return True if the Promise was successfully resolved; false otherwise
2298  */
2299 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2300 {
2301         ModelVector<ModelAction *> actions_to_check;
2302
2303         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2304                 ModelAction *read = promise->get_reader(i);
2305                 read_from(read, write);
2306                 actions_to_check.push_back(read);
2307         }
2308         /* Make sure the promise's value matches the write's value */
2309         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2310         if (!mo_graph->resolvePromise(promise, write))
2311                 priv->failed_promise = true;
2312
2313         /**
2314          * @todo  It is possible to end up in an inconsistent state, where a
2315          * "resolved" promise may still be referenced if
2316          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2317          *
2318          * Note that the inconsistency only matters when dumping mo_graph to
2319          * file.
2320          *
2321          * delete promise;
2322          */
2323
2324         //Check whether reading these writes has made threads unable to
2325         //resolve promises
2326         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2327                 ModelAction *read = actions_to_check[i];
2328                 mo_check_promises(read, true);
2329         }
2330
2331         return true;
2332 }
2333
2334 /**
2335  * Compute the set of promises that could potentially be satisfied by this
2336  * action. Note that the set computation actually appears in the Node, not in
2337  * ModelExecution.
2338  * @param curr The ModelAction that may satisfy promises
2339  */
2340 void ModelExecution::compute_promises(ModelAction *curr)
2341 {
2342         for (unsigned int i = 0; i < promises.size(); i++) {
2343                 Promise *promise = promises[i];
2344                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2345                         continue;
2346
2347                 bool satisfy = true;
2348                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2349                         const ModelAction *act = promise->get_reader(j);
2350                         if (act->happens_before(curr) ||
2351                                         act->could_synchronize_with(curr)) {
2352                                 satisfy = false;
2353                                 break;
2354                         }
2355                 }
2356                 if (satisfy)
2357                         curr->get_node()->set_promise(i);
2358         }
2359 }
2360
2361 /** Checks promises in response to change in ClockVector Threads. */
2362 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2363 {
2364         for (unsigned int i = 0; i < promises.size(); i++) {
2365                 Promise *promise = promises[i];
2366                 if (!promise->thread_is_available(tid))
2367                         continue;
2368                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2369                         const ModelAction *act = promise->get_reader(j);
2370                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2371                                         merge_cv->synchronized_since(act)) {
2372                                 if (promise->eliminate_thread(tid)) {
2373                                         /* Promise has failed */
2374                                         priv->failed_promise = true;
2375                                         return;
2376                                 }
2377                         }
2378                 }
2379         }
2380 }
2381
2382 void ModelExecution::check_promises_thread_disabled()
2383 {
2384         for (unsigned int i = 0; i < promises.size(); i++) {
2385                 Promise *promise = promises[i];
2386                 if (promise->has_failed()) {
2387                         priv->failed_promise = true;
2388                         return;
2389                 }
2390         }
2391 }
2392
2393 /**
2394  * @brief Checks promises in response to addition to modification order for
2395  * threads.
2396  *
2397  * We test whether threads are still available for satisfying promises after an
2398  * addition to our modification order constraints. Those that are unavailable
2399  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2400  * that promise has failed.
2401  *
2402  * @param act The ModelAction which updated the modification order
2403  * @param is_read_check Should be true if act is a read and we must check for
2404  * updates to the store from which it read (there is a distinction here for
2405  * RMW's, which are both a load and a store)
2406  */
2407 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2408 {
2409         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2410
2411         for (unsigned int i = 0; i < promises.size(); i++) {
2412                 Promise *promise = promises[i];
2413
2414                 // Is this promise on the same location?
2415                 if (!promise->same_location(write))
2416                         continue;
2417
2418                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2419                         const ModelAction *pread = promise->get_reader(j);
2420                         if (!pread->happens_before(act))
2421                                continue;
2422                         if (mo_graph->checkPromise(write, promise)) {
2423                                 priv->failed_promise = true;
2424                                 return;
2425                         }
2426                         break;
2427                 }
2428
2429                 // Don't do any lookups twice for the same thread
2430                 if (!promise->thread_is_available(act->get_tid()))
2431                         continue;
2432
2433                 if (mo_graph->checkReachable(promise, write)) {
2434                         if (mo_graph->checkPromise(write, promise)) {
2435                                 priv->failed_promise = true;
2436                                 return;
2437                         }
2438                 }
2439         }
2440 }
2441
2442 /**
2443  * Compute the set of writes that may break the current pending release
2444  * sequence. This information is extracted from previou release sequence
2445  * calculations.
2446  *
2447  * @param curr The current ModelAction. Must be a release sequence fixup
2448  * action.
2449  */
2450 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2451 {
2452         if (pending_rel_seqs.empty())
2453                 return;
2454
2455         struct release_seq *pending = pending_rel_seqs.back();
2456         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2457                 const ModelAction *write = pending->writes[i];
2458                 curr->get_node()->add_relseq_break(write);
2459         }
2460
2461         /* NULL means don't break the sequence; just synchronize */
2462         curr->get_node()->add_relseq_break(NULL);
2463 }
2464
2465 /**
2466  * Build up an initial set of all past writes that this 'read' action may read
2467  * from, as well as any previously-observed future values that must still be valid.
2468  *
2469  * @param curr is the current ModelAction that we are exploring; it must be a
2470  * 'read' operation.
2471  */
2472 void ModelExecution::build_may_read_from(ModelAction *curr)
2473 {
2474         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2475         unsigned int i;
2476         ASSERT(curr->is_read());
2477
2478         ModelAction *last_sc_write = NULL;
2479
2480         if (curr->is_seqcst())
2481                 last_sc_write = get_last_seq_cst_write(curr);
2482
2483         /* Iterate over all threads */
2484         for (i = 0; i < thrd_lists->size(); i++) {
2485                 /* Iterate over actions in thread, starting from most recent */
2486                 action_list_t *list = &(*thrd_lists)[i];
2487                 action_list_t::reverse_iterator rit;
2488                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2489                         ModelAction *act = *rit;
2490
2491                         /* Only consider 'write' actions */
2492                         if (!act->is_write() || act == curr)
2493                                 continue;
2494
2495                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2496                         bool allow_read = true;
2497
2498                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2499                                 allow_read = false;
2500                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2501                                 allow_read = false;
2502
2503                         if (allow_read) {
2504                                 /* Only add feasible reads */
2505                                 mo_graph->startChanges();
2506                                 r_modification_order(curr, act);
2507                                 if (!is_infeasible())
2508                                         curr->get_node()->add_read_from_past(act);
2509                                 mo_graph->rollbackChanges();
2510                         }
2511
2512                         /* Include at most one act per-thread that "happens before" curr */
2513                         if (act->happens_before(curr))
2514                                 break;
2515                 }
2516         }
2517
2518         /* Inherit existing, promised future values */
2519         for (i = 0; i < promises.size(); i++) {
2520                 const Promise *promise = promises[i];
2521                 const ModelAction *promise_read = promise->get_reader(0);
2522                 if (promise_read->same_var(curr)) {
2523                         /* Only add feasible future-values */
2524                         mo_graph->startChanges();
2525                         r_modification_order(curr, promise);
2526                         if (!is_infeasible())
2527                                 curr->get_node()->add_read_from_promise(promise_read);
2528                         mo_graph->rollbackChanges();
2529                 }
2530         }
2531
2532         /* We may find no valid may-read-from only if the execution is doomed */
2533         if (!curr->get_node()->read_from_size()) {
2534                 priv->no_valid_reads = true;
2535                 set_assert();
2536         }
2537
2538         if (DBG_ENABLED()) {
2539                 model_print("Reached read action:\n");
2540                 curr->print();
2541                 model_print("Printing read_from_past\n");
2542                 curr->get_node()->print_read_from_past();
2543                 model_print("End printing read_from_past\n");
2544         }
2545 }
2546
2547 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2548 {
2549         for ( ; write != NULL; write = write->get_reads_from()) {
2550                 /* UNINIT actions don't have a Node, and they never sleep */
2551                 if (write->is_uninitialized())
2552                         return true;
2553                 Node *prevnode = write->get_node()->get_parent();
2554
2555                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2556                 if (write->is_release() && thread_sleep)
2557                         return true;
2558                 if (!write->is_rmw())
2559                         return false;
2560         }
2561         return true;
2562 }
2563
2564 /**
2565  * @brief Get an action representing an uninitialized atomic
2566  *
2567  * This function may create a new one or try to retrieve one from the NodeStack
2568  *
2569  * @param curr The current action, which prompts the creation of an UNINIT action
2570  * @return A pointer to the UNINIT ModelAction
2571  */
2572 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2573 {
2574         Node *node = curr->get_node();
2575         ModelAction *act = node->get_uninit_action();
2576         if (!act) {
2577                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2578                 node->set_uninit_action(act);
2579         }
2580         act->create_cv(NULL);
2581         return act;
2582 }
2583
2584 static void print_list(action_list_t *list)
2585 {
2586         action_list_t::iterator it;
2587
2588         model_print("---------------------------------------------------------------------\n");
2589
2590         unsigned int hash = 0;
2591
2592         for (it = list->begin(); it != list->end(); it++) {
2593                 const ModelAction *act = *it;
2594                 if (act->get_seq_number() > 0)
2595                         act->print();
2596                 hash = hash^(hash<<3)^((*it)->hash());
2597         }
2598         model_print("HASH %u\n", hash);
2599         model_print("---------------------------------------------------------------------\n");
2600 }
2601
2602 #if SUPPORT_MOD_ORDER_DUMP
2603 void ModelExecution::dumpGraph(char *filename) const
2604 {
2605         char buffer[200];
2606         sprintf(buffer, "%s.dot", filename);
2607         FILE *file = fopen(buffer, "w");
2608         fprintf(file, "digraph %s {\n", filename);
2609         mo_graph->dumpNodes(file);
2610         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2611
2612         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2613                 ModelAction *act = *it;
2614                 if (act->is_read()) {
2615                         mo_graph->dot_print_node(file, act);
2616                         if (act->get_reads_from())
2617                                 mo_graph->dot_print_edge(file,
2618                                                 act->get_reads_from(),
2619                                                 act,
2620                                                 "label=\"rf\", color=red, weight=2");
2621                         else
2622                                 mo_graph->dot_print_edge(file,
2623                                                 act->get_reads_from_promise(),
2624                                                 act,
2625                                                 "label=\"rf\", color=red");
2626                 }
2627                 if (thread_array[act->get_tid()]) {
2628                         mo_graph->dot_print_edge(file,
2629                                         thread_array[id_to_int(act->get_tid())],
2630                                         act,
2631                                         "label=\"sb\", color=blue, weight=400");
2632                 }
2633
2634                 thread_array[act->get_tid()] = act;
2635         }
2636         fprintf(file, "}\n");
2637         model_free(thread_array);
2638         fclose(file);
2639 }
2640 #endif
2641
2642 /** @brief Prints an execution trace summary. */
2643 void ModelExecution::print_summary() const
2644 {
2645 #if SUPPORT_MOD_ORDER_DUMP
2646         char buffername[100];
2647         sprintf(buffername, "exec%04u", get_execution_number());
2648         mo_graph->dumpGraphToFile(buffername);
2649         sprintf(buffername, "graph%04u", get_execution_number());
2650         dumpGraph(buffername);
2651 #endif
2652
2653         model_print("Execution %d:", get_execution_number());
2654         if (isfeasibleprefix()) {
2655                 if (scheduler->all_threads_sleeping())
2656                         model_print(" SLEEP-SET REDUNDANT");
2657                 model_print("\n");
2658         } else
2659                 print_infeasibility(" INFEASIBLE");
2660         print_list(action_trace);
2661         model_print("\n");
2662         if (!promises.empty()) {
2663                 model_print("Pending promises:\n");
2664                 for (unsigned int i = 0; i < promises.size(); i++) {
2665                         model_print(" [P%u] ", i);
2666                         promises[i]->print();
2667                 }
2668                 model_print("\n");
2669         }
2670 }
2671
2672 /**
2673  * Add a Thread to the system for the first time. Should only be called once
2674  * per thread.
2675  * @param t The Thread to add
2676  */
2677 void ModelExecution::add_thread(Thread *t)
2678 {
2679         thread_map.put(id_to_int(t->get_id()), t);
2680         if (!t->is_model_thread())
2681                 scheduler->add_thread(t);
2682 }
2683
2684 /**
2685  * @brief Get a Thread reference by its ID
2686  * @param tid The Thread's ID
2687  * @return A Thread reference
2688  */
2689 Thread * ModelExecution::get_thread(thread_id_t tid) const
2690 {
2691         return thread_map.get(id_to_int(tid));
2692 }
2693
2694 /**
2695  * @brief Get a reference to the Thread in which a ModelAction was executed
2696  * @param act The ModelAction
2697  * @return A Thread reference
2698  */
2699 Thread * ModelExecution::get_thread(const ModelAction *act) const
2700 {
2701         return get_thread(act->get_tid());
2702 }
2703
2704 /**
2705  * @brief Get a Promise's "promise number"
2706  *
2707  * A "promise number" is an index number that is unique to a promise, valid
2708  * only for a specific snapshot of an execution trace. Promises may come and go
2709  * as they are generated an resolved, so an index only retains meaning for the
2710  * current snapshot.
2711  *
2712  * @param promise The Promise to check
2713  * @return The promise index, if the promise still is valid; otherwise -1
2714  */
2715 int ModelExecution::get_promise_number(const Promise *promise) const
2716 {
2717         for (unsigned int i = 0; i < promises.size(); i++)
2718                 if (promises[i] == promise)
2719                         return i;
2720         /* Not found */
2721         return -1;
2722 }
2723
2724 /**
2725  * @brief Check if a Thread is currently enabled
2726  * @param t The Thread to check
2727  * @return True if the Thread is currently enabled
2728  */
2729 bool ModelExecution::is_enabled(Thread *t) const
2730 {
2731         return scheduler->is_enabled(t);
2732 }
2733
2734 /**
2735  * @brief Check if a Thread is currently enabled
2736  * @param tid The ID of the Thread to check
2737  * @return True if the Thread is currently enabled
2738  */
2739 bool ModelExecution::is_enabled(thread_id_t tid) const
2740 {
2741         return scheduler->is_enabled(tid);
2742 }
2743
2744 /**
2745  * @brief Select the next thread to execute based on the curren action
2746  *
2747  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2748  * actions should be followed by the execution of their child thread. In either
2749  * case, the current action should determine the next thread schedule.
2750  *
2751  * @param curr The current action
2752  * @return The next thread to run, if the current action will determine this
2753  * selection; otherwise NULL
2754  */
2755 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2756 {
2757         /* Do not split atomic RMW */
2758         if (curr->is_rmwr())
2759                 return get_thread(curr);
2760         /* Follow CREATE with the created thread */
2761         if (curr->get_type() == THREAD_CREATE)
2762                 return curr->get_thread_operand();
2763         return NULL;
2764 }
2765
2766 /** @return True if the execution has taken too many steps */
2767 bool ModelExecution::too_many_steps() const
2768 {
2769         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2770 }
2771
2772 /**
2773  * Takes the next step in the execution, if possible.
2774  * @param curr The current step to take
2775  * @return Returns the next Thread to run, if any; NULL if this execution
2776  * should terminate
2777  */
2778 Thread * ModelExecution::take_step(ModelAction *curr)
2779 {
2780         Thread *curr_thrd = get_thread(curr);
2781         ASSERT(curr_thrd->get_state() == THREAD_READY);
2782
2783         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2784         curr = check_current_action(curr);
2785         ASSERT(curr);
2786
2787         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2788                 scheduler->remove_thread(curr_thrd);
2789
2790         return action_select_next_thread(curr);
2791 }
2792
2793 /**
2794  * Launch end-of-execution release sequence fixups only when
2795  * the execution is otherwise feasible AND there are:
2796  *
2797  * (1) pending release sequences
2798  * (2) pending assertions that could be invalidated by a change
2799  * in clock vectors (i.e., data races)
2800  * (3) no pending promises
2801  */
2802 void ModelExecution::fixup_release_sequences()
2803 {
2804         while (!pending_rel_seqs.empty() &&
2805                         is_feasible_prefix_ignore_relseq() &&
2806                         !unrealizedraces.empty()) {
2807                 model_print("*** WARNING: release sequence fixup action "
2808                                 "(%zu pending release seuqence(s)) ***\n",
2809                                 pending_rel_seqs.size());
2810                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2811                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2812                                 model_thread);
2813                 take_step(fixup);
2814         };
2815 }