promise: add max_available_thread_idx() interface
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(),
68         thread_map(2), /* We'll always need at least 2 threads */
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         promises(),
73         futurevalues(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         add_thread(model_thread);
84         scheduler->register_engine(this);
85 }
86
87 /** @brief Destructor */
88 ModelExecution::~ModelExecution()
89 {
90         for (unsigned int i = 0; i < get_num_threads(); i++)
91                 delete get_thread(int_to_id(i));
92
93         for (unsigned int i = 0; i < promises.size(); i++)
94                 delete promises[i];
95
96         delete mo_graph;
97         delete priv;
98 }
99
100 int ModelExecution::get_execution_number() const
101 {
102         return model->get_execution_number();
103 }
104
105 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
106 {
107         action_list_t *tmp = hash->get(ptr);
108         if (tmp == NULL) {
109                 tmp = new action_list_t();
110                 hash->put(ptr, tmp);
111         }
112         return tmp;
113 }
114
115 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
116 {
117         SnapVector<action_list_t> *tmp = hash->get(ptr);
118         if (tmp == NULL) {
119                 tmp = new SnapVector<action_list_t>();
120                 hash->put(ptr, tmp);
121         }
122         return tmp;
123 }
124
125 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
126 {
127         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
128         if (wrv==NULL)
129                 return NULL;
130         unsigned int thread=id_to_int(tid);
131         if (thread < wrv->size())
132                 return &(*wrv)[thread];
133         else
134                 return NULL;
135 }
136
137 /** @return a thread ID for a new Thread */
138 thread_id_t ModelExecution::get_next_id()
139 {
140         return priv->next_thread_id++;
141 }
142
143 /** @return the number of user threads created during this execution */
144 unsigned int ModelExecution::get_num_threads() const
145 {
146         return priv->next_thread_id;
147 }
148
149 /** @return a sequence number for a new ModelAction */
150 modelclock_t ModelExecution::get_next_seq_num()
151 {
152         return ++priv->used_sequence_numbers;
153 }
154
155 /**
156  * @brief Should the current action wake up a given thread?
157  *
158  * @param curr The current action
159  * @param thread The thread that we might wake up
160  * @return True, if we should wake up the sleeping thread; false otherwise
161  */
162 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
163 {
164         const ModelAction *asleep = thread->get_pending();
165         /* Don't allow partial RMW to wake anyone up */
166         if (curr->is_rmwr())
167                 return false;
168         /* Synchronizing actions may have been backtracked */
169         if (asleep->could_synchronize_with(curr))
170                 return true;
171         /* All acquire/release fences and fence-acquire/store-release */
172         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
173                 return true;
174         /* Fence-release + store can awake load-acquire on the same location */
175         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
176                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
177                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
178                         return true;
179         }
180         return false;
181 }
182
183 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
184 {
185         for (unsigned int i = 0; i < get_num_threads(); i++) {
186                 Thread *thr = get_thread(int_to_id(i));
187                 if (scheduler->is_sleep_set(thr)) {
188                         if (should_wake_up(curr, thr))
189                                 /* Remove this thread from sleep set */
190                                 scheduler->remove_sleep(thr);
191                 }
192         }
193 }
194
195 /** @brief Alert the model-checker that an incorrectly-ordered
196  * synchronization was made */
197 void ModelExecution::set_bad_synchronization()
198 {
199         priv->bad_synchronization = true;
200 }
201
202 bool ModelExecution::assert_bug(const char *msg)
203 {
204         priv->bugs.push_back(new bug_message(msg));
205
206         if (isfeasibleprefix()) {
207                 set_assert();
208                 return true;
209         }
210         return false;
211 }
212
213 /** @return True, if any bugs have been reported for this execution */
214 bool ModelExecution::have_bug_reports() const
215 {
216         return priv->bugs.size() != 0;
217 }
218
219 SnapVector<bug_message *> * ModelExecution::get_bugs() const
220 {
221         return &priv->bugs;
222 }
223
224 /**
225  * Check whether the current trace has triggered an assertion which should halt
226  * its execution.
227  *
228  * @return True, if the execution should be aborted; false otherwise
229  */
230 bool ModelExecution::has_asserted() const
231 {
232         return priv->asserted;
233 }
234
235 /**
236  * Trigger a trace assertion which should cause this execution to be halted.
237  * This can be due to a detected bug or due to an infeasibility that should
238  * halt ASAP.
239  */
240 void ModelExecution::set_assert()
241 {
242         priv->asserted = true;
243 }
244
245 /**
246  * Check if we are in a deadlock. Should only be called at the end of an
247  * execution, although it should not give false positives in the middle of an
248  * execution (there should be some ENABLED thread).
249  *
250  * @return True if program is in a deadlock; false otherwise
251  */
252 bool ModelExecution::is_deadlocked() const
253 {
254         bool blocking_threads = false;
255         for (unsigned int i = 0; i < get_num_threads(); i++) {
256                 thread_id_t tid = int_to_id(i);
257                 if (is_enabled(tid))
258                         return false;
259                 Thread *t = get_thread(tid);
260                 if (!t->is_model_thread() && t->get_pending())
261                         blocking_threads = true;
262         }
263         return blocking_threads;
264 }
265
266 /**
267  * Check if this is a complete execution. That is, have all thread completed
268  * execution (rather than exiting because sleep sets have forced a redundant
269  * execution).
270  *
271  * @return True if the execution is complete.
272  */
273 bool ModelExecution::is_complete_execution() const
274 {
275         for (unsigned int i = 0; i < get_num_threads(); i++)
276                 if (is_enabled(int_to_id(i)))
277                         return false;
278         return true;
279 }
280
281 /**
282  * @brief Find the last fence-related backtracking conflict for a ModelAction
283  *
284  * This function performs the search for the most recent conflicting action
285  * against which we should perform backtracking, as affected by fence
286  * operations. This includes pairs of potentially-synchronizing actions which
287  * occur due to fence-acquire or fence-release, and hence should be explored in
288  * the opposite execution order.
289  *
290  * @param act The current action
291  * @return The most recent action which conflicts with act due to fences
292  */
293 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
294 {
295         /* Only perform release/acquire fence backtracking for stores */
296         if (!act->is_write())
297                 return NULL;
298
299         /* Find a fence-release (or, act is a release) */
300         ModelAction *last_release;
301         if (act->is_release())
302                 last_release = act;
303         else
304                 last_release = get_last_fence_release(act->get_tid());
305         if (!last_release)
306                 return NULL;
307
308         /* Skip past the release */
309         const action_list_t *list = &action_trace;
310         action_list_t::const_reverse_iterator rit;
311         for (rit = list->rbegin(); rit != list->rend(); rit++)
312                 if (*rit == last_release)
313                         break;
314         ASSERT(rit != list->rend());
315
316         /* Find a prior:
317          *   load-acquire
318          * or
319          *   load --sb-> fence-acquire */
320         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
321         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
322         bool found_acquire_fences = false;
323         for ( ; rit != list->rend(); rit++) {
324                 ModelAction *prev = *rit;
325                 if (act->same_thread(prev))
326                         continue;
327
328                 int tid = id_to_int(prev->get_tid());
329
330                 if (prev->is_read() && act->same_var(prev)) {
331                         if (prev->is_acquire()) {
332                                 /* Found most recent load-acquire, don't need
333                                  * to search for more fences */
334                                 if (!found_acquire_fences)
335                                         return NULL;
336                         } else {
337                                 prior_loads[tid] = prev;
338                         }
339                 }
340                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
341                         found_acquire_fences = true;
342                         acquire_fences[tid] = prev;
343                 }
344         }
345
346         ModelAction *latest_backtrack = NULL;
347         for (unsigned int i = 0; i < acquire_fences.size(); i++)
348                 if (acquire_fences[i] && prior_loads[i])
349                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
350                                 latest_backtrack = acquire_fences[i];
351         return latest_backtrack;
352 }
353
354 /**
355  * @brief Find the last backtracking conflict for a ModelAction
356  *
357  * This function performs the search for the most recent conflicting action
358  * against which we should perform backtracking. This primary includes pairs of
359  * synchronizing actions which should be explored in the opposite execution
360  * order.
361  *
362  * @param act The current action
363  * @return The most recent action which conflicts with act
364  */
365 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
366 {
367         switch (act->get_type()) {
368         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
369         case ATOMIC_READ:
370         case ATOMIC_WRITE:
371         case ATOMIC_RMW: {
372                 ModelAction *ret = NULL;
373
374                 /* linear search: from most recent to oldest */
375                 action_list_t *list = obj_map.get(act->get_location());
376                 action_list_t::reverse_iterator rit;
377                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
378                         ModelAction *prev = *rit;
379                         if (prev->could_synchronize_with(act)) {
380                                 ret = prev;
381                                 break;
382                         }
383                 }
384
385                 ModelAction *ret2 = get_last_fence_conflict(act);
386                 if (!ret2)
387                         return ret;
388                 if (!ret)
389                         return ret2;
390                 if (*ret < *ret2)
391                         return ret2;
392                 return ret;
393         }
394         case ATOMIC_LOCK:
395         case ATOMIC_TRYLOCK: {
396                 /* linear search: from most recent to oldest */
397                 action_list_t *list = obj_map.get(act->get_location());
398                 action_list_t::reverse_iterator rit;
399                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
400                         ModelAction *prev = *rit;
401                         if (act->is_conflicting_lock(prev))
402                                 return prev;
403                 }
404                 break;
405         }
406         case ATOMIC_UNLOCK: {
407                 /* linear search: from most recent to oldest */
408                 action_list_t *list = obj_map.get(act->get_location());
409                 action_list_t::reverse_iterator rit;
410                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
411                         ModelAction *prev = *rit;
412                         if (!act->same_thread(prev) && prev->is_failed_trylock())
413                                 return prev;
414                 }
415                 break;
416         }
417         case ATOMIC_WAIT: {
418                 /* linear search: from most recent to oldest */
419                 action_list_t *list = obj_map.get(act->get_location());
420                 action_list_t::reverse_iterator rit;
421                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
422                         ModelAction *prev = *rit;
423                         if (!act->same_thread(prev) && prev->is_failed_trylock())
424                                 return prev;
425                         if (!act->same_thread(prev) && prev->is_notify())
426                                 return prev;
427                 }
428                 break;
429         }
430
431         case ATOMIC_NOTIFY_ALL:
432         case ATOMIC_NOTIFY_ONE: {
433                 /* linear search: from most recent to oldest */
434                 action_list_t *list = obj_map.get(act->get_location());
435                 action_list_t::reverse_iterator rit;
436                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
437                         ModelAction *prev = *rit;
438                         if (!act->same_thread(prev) && prev->is_wait())
439                                 return prev;
440                 }
441                 break;
442         }
443         default:
444                 break;
445         }
446         return NULL;
447 }
448
449 /** This method finds backtracking points where we should try to
450  * reorder the parameter ModelAction against.
451  *
452  * @param the ModelAction to find backtracking points for.
453  */
454 void ModelExecution::set_backtracking(ModelAction *act)
455 {
456         Thread *t = get_thread(act);
457         ModelAction *prev = get_last_conflict(act);
458         if (prev == NULL)
459                 return;
460
461         Node *node = prev->get_node()->get_parent();
462
463         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
464         int low_tid, high_tid;
465         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
466                 low_tid = id_to_int(act->get_tid());
467                 high_tid = low_tid + 1;
468         } else {
469                 low_tid = 0;
470                 high_tid = get_num_threads();
471         }
472
473         for (int i = low_tid; i < high_tid; i++) {
474                 thread_id_t tid = int_to_id(i);
475
476                 /* Make sure this thread can be enabled here. */
477                 if (i >= node->get_num_threads())
478                         break;
479
480                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
481                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
482                 if (node->enabled_status(tid) != THREAD_ENABLED)
483                         continue;
484
485                 /* Check if this has been explored already */
486                 if (node->has_been_explored(tid))
487                         continue;
488
489                 /* See if fairness allows */
490                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
491                         bool unfair = false;
492                         for (int t = 0; t < node->get_num_threads(); t++) {
493                                 thread_id_t tother = int_to_id(t);
494                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
495                                         unfair = true;
496                                         break;
497                                 }
498                         }
499                         if (unfair)
500                                 continue;
501                 }
502
503                 /* See if CHESS-like yield fairness allows */
504                 if (params->yieldon) {
505                         bool unfair = false;
506                         for (int t = 0; t < node->get_num_threads(); t++) {
507                                 thread_id_t tother = int_to_id(t);
508                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
509                                         unfair = true;
510                                         break;
511                                 }
512                         }
513                         if (unfair)
514                                 continue;
515                 }
516
517                 /* Cache the latest backtracking point */
518                 set_latest_backtrack(prev);
519
520                 /* If this is a new backtracking point, mark the tree */
521                 if (!node->set_backtrack(tid))
522                         continue;
523                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
524                                         id_to_int(prev->get_tid()),
525                                         id_to_int(t->get_id()));
526                 if (DBG_ENABLED()) {
527                         prev->print();
528                         act->print();
529                 }
530         }
531 }
532
533 /**
534  * @brief Cache the a backtracking point as the "most recent", if eligible
535  *
536  * Note that this does not prepare the NodeStack for this backtracking
537  * operation, it only caches the action on a per-execution basis
538  *
539  * @param act The operation at which we should explore a different next action
540  * (i.e., backtracking point)
541  * @return True, if this action is now the most recent backtracking point;
542  * false otherwise
543  */
544 bool ModelExecution::set_latest_backtrack(ModelAction *act)
545 {
546         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
547                 priv->next_backtrack = act;
548                 return true;
549         }
550         return false;
551 }
552
553 /**
554  * Returns last backtracking point. The model checker will explore a different
555  * path for this point in the next execution.
556  * @return The ModelAction at which the next execution should diverge.
557  */
558 ModelAction * ModelExecution::get_next_backtrack()
559 {
560         ModelAction *next = priv->next_backtrack;
561         priv->next_backtrack = NULL;
562         return next;
563 }
564
565 /**
566  * Processes a read model action.
567  * @param curr is the read model action to process.
568  * @return True if processing this read updates the mo_graph.
569  */
570 bool ModelExecution::process_read(ModelAction *curr)
571 {
572         Node *node = curr->get_node();
573         while (true) {
574                 bool updated = false;
575                 switch (node->get_read_from_status()) {
576                 case READ_FROM_PAST: {
577                         const ModelAction *rf = node->get_read_from_past();
578                         ASSERT(rf);
579
580                         mo_graph->startChanges();
581
582                         ASSERT(!is_infeasible());
583                         if (!check_recency(curr, rf)) {
584                                 if (node->increment_read_from()) {
585                                         mo_graph->rollbackChanges();
586                                         continue;
587                                 } else {
588                                         priv->too_many_reads = true;
589                                 }
590                         }
591
592                         updated = r_modification_order(curr, rf);
593                         read_from(curr, rf);
594                         mo_graph->commitChanges();
595                         mo_check_promises(curr, true);
596                         break;
597                 }
598                 case READ_FROM_PROMISE: {
599                         Promise *promise = curr->get_node()->get_read_from_promise();
600                         if (promise->add_reader(curr))
601                                 priv->failed_promise = true;
602                         curr->set_read_from_promise(promise);
603                         mo_graph->startChanges();
604                         if (!check_recency(curr, promise))
605                                 priv->too_many_reads = true;
606                         updated = r_modification_order(curr, promise);
607                         mo_graph->commitChanges();
608                         break;
609                 }
610                 case READ_FROM_FUTURE: {
611                         /* Read from future value */
612                         struct future_value fv = node->get_future_value();
613                         Promise *promise = new Promise(this, curr, fv);
614                         curr->set_read_from_promise(promise);
615                         promises.push_back(promise);
616                         mo_graph->startChanges();
617                         updated = r_modification_order(curr, promise);
618                         mo_graph->commitChanges();
619                         break;
620                 }
621                 default:
622                         ASSERT(false);
623                 }
624                 get_thread(curr)->set_return_value(curr->get_return_value());
625                 return updated;
626         }
627 }
628
629 /**
630  * Processes a lock, trylock, or unlock model action.  @param curr is
631  * the read model action to process.
632  *
633  * The try lock operation checks whether the lock is taken.  If not,
634  * it falls to the normal lock operation case.  If so, it returns
635  * fail.
636  *
637  * The lock operation has already been checked that it is enabled, so
638  * it just grabs the lock and synchronizes with the previous unlock.
639  *
640  * The unlock operation has to re-enable all of the threads that are
641  * waiting on the lock.
642  *
643  * @return True if synchronization was updated; false otherwise
644  */
645 bool ModelExecution::process_mutex(ModelAction *curr)
646 {
647         std::mutex *mutex = curr->get_mutex();
648         struct std::mutex_state *state = NULL;
649
650         if (mutex)
651                 state = mutex->get_state();
652
653         switch (curr->get_type()) {
654         case ATOMIC_TRYLOCK: {
655                 bool success = !state->locked;
656                 curr->set_try_lock(success);
657                 if (!success) {
658                         get_thread(curr)->set_return_value(0);
659                         break;
660                 }
661                 get_thread(curr)->set_return_value(1);
662         }
663                 //otherwise fall into the lock case
664         case ATOMIC_LOCK: {
665                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
666                         assert_bug("Lock access before initialization");
667                 state->locked = get_thread(curr);
668                 ModelAction *unlock = get_last_unlock(curr);
669                 //synchronize with the previous unlock statement
670                 if (unlock != NULL) {
671                         synchronize(unlock, curr);
672                         return true;
673                 }
674                 break;
675         }
676         case ATOMIC_WAIT:
677         case ATOMIC_UNLOCK: {
678                 /* wake up the other threads */
679                 for (unsigned int i = 0; i < get_num_threads(); i++) {
680                         Thread *t = get_thread(int_to_id(i));
681                         Thread *curr_thrd = get_thread(curr);
682                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
683                                 scheduler->wake(t);
684                 }
685
686                 /* unlock the lock - after checking who was waiting on it */
687                 state->locked = NULL;
688
689                 if (!curr->is_wait())
690                         break; /* The rest is only for ATOMIC_WAIT */
691
692                 /* Should we go to sleep? (simulate spurious failures) */
693                 if (curr->get_node()->get_misc() == 0) {
694                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
695                         /* disable us */
696                         scheduler->sleep(get_thread(curr));
697                 }
698                 break;
699         }
700         case ATOMIC_NOTIFY_ALL: {
701                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
702                 //activate all the waiting threads
703                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
704                         scheduler->wake(get_thread(*rit));
705                 }
706                 waiters->clear();
707                 break;
708         }
709         case ATOMIC_NOTIFY_ONE: {
710                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
711                 int wakeupthread = curr->get_node()->get_misc();
712                 action_list_t::iterator it = waiters->begin();
713                 advance(it, wakeupthread);
714                 scheduler->wake(get_thread(*it));
715                 waiters->erase(it);
716                 break;
717         }
718
719         default:
720                 ASSERT(0);
721         }
722         return false;
723 }
724
725 /**
726  * @brief Check if the current pending promises allow a future value to be sent
727  *
728  * If one of the following is true:
729  *  (a) there are no pending promises
730  *  (b) the reader and writer do not cross any promises
731  * Then, it is safe to pass a future value back now.
732  *
733  * Otherwise, we must save the pending future value until (a) or (b) is true
734  *
735  * @param writer The operation which sends the future value. Must be a write.
736  * @param reader The operation which will observe the value. Must be a read.
737  * @return True if the future value can be sent now; false if it must wait.
738  */
739 bool ModelExecution::promises_may_allow(const ModelAction *writer,
740                 const ModelAction *reader) const
741 {
742         if (promises.empty())
743                 return true;
744         for (int i = promises.size() - 1; i >= 0; i--) {
745                 ModelAction *pr = promises[i]->get_reader(0);
746                 //reader is after promise...doesn't cross any promise
747                 if (*reader > *pr)
748                         return true;
749                 //writer is after promise, reader before...bad...
750                 if (*writer > *pr)
751                         return false;
752         }
753         return true;
754 }
755
756 /**
757  * @brief Add a future value to a reader
758  *
759  * This function performs a few additional checks to ensure that the future
760  * value can be feasibly observed by the reader
761  *
762  * @param writer The operation whose value is sent. Must be a write.
763  * @param reader The read operation which may read the future value. Must be a read.
764  */
765 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
766 {
767         /* Do more ambitious checks now that mo is more complete */
768         if (!mo_may_allow(writer, reader))
769                 return;
770
771         Node *node = reader->get_node();
772
773         /* Find an ancestor thread which exists at the time of the reader */
774         Thread *write_thread = get_thread(writer);
775         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
776                 write_thread = write_thread->get_parent();
777
778         struct future_value fv = {
779                 writer->get_write_value(),
780                 writer->get_seq_number() + params->maxfuturedelay,
781                 write_thread->get_id(),
782         };
783         if (node->add_future_value(fv))
784                 set_latest_backtrack(reader);
785 }
786
787 /**
788  * Process a write ModelAction
789  * @param curr The ModelAction to process
790  * @return True if the mo_graph was updated or promises were resolved
791  */
792 bool ModelExecution::process_write(ModelAction *curr)
793 {
794         /* Readers to which we may send our future value */
795         ModelVector<ModelAction *> send_fv;
796
797         const ModelAction *earliest_promise_reader;
798         bool updated_promises = false;
799
800         bool updated_mod_order = w_modification_order(curr, &send_fv);
801         Promise *promise = pop_promise_to_resolve(curr);
802
803         if (promise) {
804                 earliest_promise_reader = promise->get_reader(0);
805                 updated_promises = resolve_promise(curr, promise);
806         } else
807                 earliest_promise_reader = NULL;
808
809         for (unsigned int i = 0; i < send_fv.size(); i++) {
810                 ModelAction *read = send_fv[i];
811
812                 /* Don't send future values to reads after the Promise we resolve */
813                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
814                         /* Check if future value can be sent immediately */
815                         if (promises_may_allow(curr, read)) {
816                                 add_future_value(curr, read);
817                         } else {
818                                 futurevalues.push_back(PendingFutureValue(curr, read));
819                         }
820                 }
821         }
822
823         /* Check the pending future values */
824         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
825                 struct PendingFutureValue pfv = futurevalues[i];
826                 if (promises_may_allow(pfv.writer, pfv.reader)) {
827                         add_future_value(pfv.writer, pfv.reader);
828                         futurevalues.erase(futurevalues.begin() + i);
829                 }
830         }
831
832         mo_graph->commitChanges();
833         mo_check_promises(curr, false);
834
835         get_thread(curr)->set_return_value(VALUE_NONE);
836         return updated_mod_order || updated_promises;
837 }
838
839 /**
840  * Process a fence ModelAction
841  * @param curr The ModelAction to process
842  * @return True if synchronization was updated
843  */
844 bool ModelExecution::process_fence(ModelAction *curr)
845 {
846         /*
847          * fence-relaxed: no-op
848          * fence-release: only log the occurence (not in this function), for
849          *   use in later synchronization
850          * fence-acquire (this function): search for hypothetical release
851          *   sequences
852          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
853          */
854         bool updated = false;
855         if (curr->is_acquire()) {
856                 action_list_t *list = &action_trace;
857                 action_list_t::reverse_iterator rit;
858                 /* Find X : is_read(X) && X --sb-> curr */
859                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
860                         ModelAction *act = *rit;
861                         if (act == curr)
862                                 continue;
863                         if (act->get_tid() != curr->get_tid())
864                                 continue;
865                         /* Stop at the beginning of the thread */
866                         if (act->is_thread_start())
867                                 break;
868                         /* Stop once we reach a prior fence-acquire */
869                         if (act->is_fence() && act->is_acquire())
870                                 break;
871                         if (!act->is_read())
872                                 continue;
873                         /* read-acquire will find its own release sequences */
874                         if (act->is_acquire())
875                                 continue;
876
877                         /* Establish hypothetical release sequences */
878                         rel_heads_list_t release_heads;
879                         get_release_seq_heads(curr, act, &release_heads);
880                         for (unsigned int i = 0; i < release_heads.size(); i++)
881                                 synchronize(release_heads[i], curr);
882                         if (release_heads.size() != 0)
883                                 updated = true;
884                 }
885         }
886         return updated;
887 }
888
889 /**
890  * @brief Process the current action for thread-related activity
891  *
892  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
893  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
894  * synchronization, etc.  This function is a no-op for non-THREAD actions
895  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
896  *
897  * @param curr The current action
898  * @return True if synchronization was updated or a thread completed
899  */
900 bool ModelExecution::process_thread_action(ModelAction *curr)
901 {
902         bool updated = false;
903
904         switch (curr->get_type()) {
905         case THREAD_CREATE: {
906                 thrd_t *thrd = (thrd_t *)curr->get_location();
907                 struct thread_params *params = (struct thread_params *)curr->get_value();
908                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
909                 add_thread(th);
910                 th->set_creation(curr);
911                 /* Promises can be satisfied by children */
912                 for (unsigned int i = 0; i < promises.size(); i++) {
913                         Promise *promise = promises[i];
914                         if (promise->thread_is_available(curr->get_tid()))
915                                 promise->add_thread(th->get_id());
916                 }
917                 break;
918         }
919         case THREAD_JOIN: {
920                 Thread *blocking = curr->get_thread_operand();
921                 ModelAction *act = get_last_action(blocking->get_id());
922                 synchronize(act, curr);
923                 updated = true; /* trigger rel-seq checks */
924                 break;
925         }
926         case THREAD_FINISH: {
927                 Thread *th = get_thread(curr);
928                 /* Wake up any joining threads */
929                 for (unsigned int i = 0; i < get_num_threads(); i++) {
930                         Thread *waiting = get_thread(int_to_id(i));
931                         if (waiting->waiting_on() == th &&
932                                         waiting->get_pending()->is_thread_join())
933                                 scheduler->wake(waiting);
934                 }
935                 th->complete();
936                 /* Completed thread can't satisfy promises */
937                 for (unsigned int i = 0; i < promises.size(); i++) {
938                         Promise *promise = promises[i];
939                         if (promise->thread_is_available(th->get_id()))
940                                 if (promise->eliminate_thread(th->get_id()))
941                                         priv->failed_promise = true;
942                 }
943                 updated = true; /* trigger rel-seq checks */
944                 break;
945         }
946         case THREAD_START: {
947                 check_promises(curr->get_tid(), NULL, curr->get_cv());
948                 break;
949         }
950         default:
951                 break;
952         }
953
954         return updated;
955 }
956
957 /**
958  * @brief Process the current action for release sequence fixup activity
959  *
960  * Performs model-checker release sequence fixups for the current action,
961  * forcing a single pending release sequence to break (with a given, potential
962  * "loose" write) or to complete (i.e., synchronize). If a pending release
963  * sequence forms a complete release sequence, then we must perform the fixup
964  * synchronization, mo_graph additions, etc.
965  *
966  * @param curr The current action; must be a release sequence fixup action
967  * @param work_queue The work queue to which to add work items as they are
968  * generated
969  */
970 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
971 {
972         const ModelAction *write = curr->get_node()->get_relseq_break();
973         struct release_seq *sequence = pending_rel_seqs.back();
974         pending_rel_seqs.pop_back();
975         ASSERT(sequence);
976         ModelAction *acquire = sequence->acquire;
977         const ModelAction *rf = sequence->rf;
978         const ModelAction *release = sequence->release;
979         ASSERT(acquire);
980         ASSERT(release);
981         ASSERT(rf);
982         ASSERT(release->same_thread(rf));
983
984         if (write == NULL) {
985                 /**
986                  * @todo Forcing a synchronization requires that we set
987                  * modification order constraints. For instance, we can't allow
988                  * a fixup sequence in which two separate read-acquire
989                  * operations read from the same sequence, where the first one
990                  * synchronizes and the other doesn't. Essentially, we can't
991                  * allow any writes to insert themselves between 'release' and
992                  * 'rf'
993                  */
994
995                 /* Must synchronize */
996                 if (!synchronize(release, acquire))
997                         return;
998                 /* Re-check all pending release sequences */
999                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1000                 /* Re-check act for mo_graph edges */
1001                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1002
1003                 /* propagate synchronization to later actions */
1004                 action_list_t::reverse_iterator rit = action_trace.rbegin();
1005                 for (; (*rit) != acquire; rit++) {
1006                         ModelAction *propagate = *rit;
1007                         if (acquire->happens_before(propagate)) {
1008                                 synchronize(acquire, propagate);
1009                                 /* Re-check 'propagate' for mo_graph edges */
1010                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1011                         }
1012                 }
1013         } else {
1014                 /* Break release sequence with new edges:
1015                  *   release --mo--> write --mo--> rf */
1016                 mo_graph->addEdge(release, write);
1017                 mo_graph->addEdge(write, rf);
1018         }
1019
1020         /* See if we have realized a data race */
1021         checkDataRaces();
1022 }
1023
1024 /**
1025  * Initialize the current action by performing one or more of the following
1026  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1027  * in the NodeStack, manipulating backtracking sets, allocating and
1028  * initializing clock vectors, and computing the promises to fulfill.
1029  *
1030  * @param curr The current action, as passed from the user context; may be
1031  * freed/invalidated after the execution of this function, with a different
1032  * action "returned" its place (pass-by-reference)
1033  * @return True if curr is a newly-explored action; false otherwise
1034  */
1035 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1036 {
1037         ModelAction *newcurr;
1038
1039         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1040                 newcurr = process_rmw(*curr);
1041                 delete *curr;
1042
1043                 if (newcurr->is_rmw())
1044                         compute_promises(newcurr);
1045
1046                 *curr = newcurr;
1047                 return false;
1048         }
1049
1050         (*curr)->set_seq_number(get_next_seq_num());
1051
1052         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1053         if (newcurr) {
1054                 /* First restore type and order in case of RMW operation */
1055                 if ((*curr)->is_rmwr())
1056                         newcurr->copy_typeandorder(*curr);
1057
1058                 ASSERT((*curr)->get_location() == newcurr->get_location());
1059                 newcurr->copy_from_new(*curr);
1060
1061                 /* Discard duplicate ModelAction; use action from NodeStack */
1062                 delete *curr;
1063
1064                 /* Always compute new clock vector */
1065                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1066
1067                 *curr = newcurr;
1068                 return false; /* Action was explored previously */
1069         } else {
1070                 newcurr = *curr;
1071
1072                 /* Always compute new clock vector */
1073                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1074
1075                 /* Assign most recent release fence */
1076                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1077
1078                 /*
1079                  * Perform one-time actions when pushing new ModelAction onto
1080                  * NodeStack
1081                  */
1082                 if (newcurr->is_write())
1083                         compute_promises(newcurr);
1084                 else if (newcurr->is_relseq_fixup())
1085                         compute_relseq_breakwrites(newcurr);
1086                 else if (newcurr->is_wait())
1087                         newcurr->get_node()->set_misc_max(2);
1088                 else if (newcurr->is_notify_one()) {
1089                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1090                 }
1091                 return true; /* This was a new ModelAction */
1092         }
1093 }
1094
1095 /**
1096  * @brief Establish reads-from relation between two actions
1097  *
1098  * Perform basic operations involved with establishing a concrete rf relation,
1099  * including setting the ModelAction data and checking for release sequences.
1100  *
1101  * @param act The action that is reading (must be a read)
1102  * @param rf The action from which we are reading (must be a write)
1103  *
1104  * @return True if this read established synchronization
1105  */
1106 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1107 {
1108         ASSERT(rf);
1109         ASSERT(rf->is_write());
1110
1111         act->set_read_from(rf);
1112         if (act->is_acquire()) {
1113                 rel_heads_list_t release_heads;
1114                 get_release_seq_heads(act, act, &release_heads);
1115                 int num_heads = release_heads.size();
1116                 for (unsigned int i = 0; i < release_heads.size(); i++)
1117                         if (!synchronize(release_heads[i], act))
1118                                 num_heads--;
1119                 return num_heads > 0;
1120         }
1121         return false;
1122 }
1123
1124 /**
1125  * @brief Synchronizes two actions
1126  *
1127  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1128  * This function performs the synchronization as well as providing other hooks
1129  * for other checks along with synchronization.
1130  *
1131  * @param first The left-hand side of the synchronizes-with relation
1132  * @param second The right-hand side of the synchronizes-with relation
1133  * @return True if the synchronization was successful (i.e., was consistent
1134  * with the execution order); false otherwise
1135  */
1136 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1137 {
1138         if (*second < *first) {
1139                 set_bad_synchronization();
1140                 return false;
1141         }
1142         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1143         return second->synchronize_with(first);
1144 }
1145
1146 /**
1147  * Check promises and eliminate potentially-satisfying threads when a thread is
1148  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1149  * no longer satisfy a promise generated from that thread.
1150  *
1151  * @param blocker The thread on which a thread is waiting
1152  * @param waiting The waiting thread
1153  */
1154 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1155 {
1156         for (unsigned int i = 0; i < promises.size(); i++) {
1157                 Promise *promise = promises[i];
1158                 if (!promise->thread_is_available(waiting->get_id()))
1159                         continue;
1160                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1161                         ModelAction *reader = promise->get_reader(j);
1162                         if (reader->get_tid() != blocker->get_id())
1163                                 continue;
1164                         if (promise->eliminate_thread(waiting->get_id())) {
1165                                 /* Promise has failed */
1166                                 priv->failed_promise = true;
1167                         } else {
1168                                 /* Only eliminate the 'waiting' thread once */
1169                                 return;
1170                         }
1171                 }
1172         }
1173 }
1174
1175 /**
1176  * @brief Check whether a model action is enabled.
1177  *
1178  * Checks whether a lock or join operation would be successful (i.e., is the
1179  * lock already locked, or is the joined thread already complete). If not, put
1180  * the action in a waiter list.
1181  *
1182  * @param curr is the ModelAction to check whether it is enabled.
1183  * @return a bool that indicates whether the action is enabled.
1184  */
1185 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1186         if (curr->is_lock()) {
1187                 std::mutex *lock = curr->get_mutex();
1188                 struct std::mutex_state *state = lock->get_state();
1189                 if (state->locked)
1190                         return false;
1191         } else if (curr->is_thread_join()) {
1192                 Thread *blocking = curr->get_thread_operand();
1193                 if (!blocking->is_complete()) {
1194                         thread_blocking_check_promises(blocking, get_thread(curr));
1195                         return false;
1196                 }
1197         }
1198
1199         return true;
1200 }
1201
1202 /**
1203  * This is the heart of the model checker routine. It performs model-checking
1204  * actions corresponding to a given "current action." Among other processes, it
1205  * calculates reads-from relationships, updates synchronization clock vectors,
1206  * forms a memory_order constraints graph, and handles replay/backtrack
1207  * execution when running permutations of previously-observed executions.
1208  *
1209  * @param curr The current action to process
1210  * @return The ModelAction that is actually executed; may be different than
1211  * curr; may be NULL, if the current action is not enabled to run
1212  */
1213 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1214 {
1215         ASSERT(curr);
1216         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1217         bool newly_explored = initialize_curr_action(&curr);
1218
1219         DBG();
1220
1221         wake_up_sleeping_actions(curr);
1222
1223         /* Compute fairness information for CHESS yield algorithm */
1224         if (params->yieldon) {
1225                 curr->get_node()->update_yield(scheduler);
1226         }
1227
1228         /* Add the action to lists before any other model-checking tasks */
1229         if (!second_part_of_rmw)
1230                 add_action_to_lists(curr);
1231
1232         /* Build may_read_from set for newly-created actions */
1233         if (newly_explored && curr->is_read())
1234                 build_may_read_from(curr);
1235
1236         /* Initialize work_queue with the "current action" work */
1237         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1238         while (!work_queue.empty() && !has_asserted()) {
1239                 WorkQueueEntry work = work_queue.front();
1240                 work_queue.pop_front();
1241
1242                 switch (work.type) {
1243                 case WORK_CHECK_CURR_ACTION: {
1244                         ModelAction *act = work.action;
1245                         bool update = false; /* update this location's release seq's */
1246                         bool update_all = false; /* update all release seq's */
1247
1248                         if (process_thread_action(curr))
1249                                 update_all = true;
1250
1251                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1252                                 update = true;
1253
1254                         if (act->is_write() && process_write(act))
1255                                 update = true;
1256
1257                         if (act->is_fence() && process_fence(act))
1258                                 update_all = true;
1259
1260                         if (act->is_mutex_op() && process_mutex(act))
1261                                 update_all = true;
1262
1263                         if (act->is_relseq_fixup())
1264                                 process_relseq_fixup(curr, &work_queue);
1265
1266                         if (update_all)
1267                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1268                         else if (update)
1269                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1270                         break;
1271                 }
1272                 case WORK_CHECK_RELEASE_SEQ:
1273                         resolve_release_sequences(work.location, &work_queue);
1274                         break;
1275                 case WORK_CHECK_MO_EDGES: {
1276                         /** @todo Complete verification of work_queue */
1277                         ModelAction *act = work.action;
1278                         bool updated = false;
1279
1280                         if (act->is_read()) {
1281                                 const ModelAction *rf = act->get_reads_from();
1282                                 const Promise *promise = act->get_reads_from_promise();
1283                                 if (rf) {
1284                                         if (r_modification_order(act, rf))
1285                                                 updated = true;
1286                                 } else if (promise) {
1287                                         if (r_modification_order(act, promise))
1288                                                 updated = true;
1289                                 }
1290                         }
1291                         if (act->is_write()) {
1292                                 if (w_modification_order(act, NULL))
1293                                         updated = true;
1294                         }
1295                         mo_graph->commitChanges();
1296
1297                         if (updated)
1298                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1299                         break;
1300                 }
1301                 default:
1302                         ASSERT(false);
1303                         break;
1304                 }
1305         }
1306
1307         check_curr_backtracking(curr);
1308         set_backtracking(curr);
1309         return curr;
1310 }
1311
1312 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1313 {
1314         Node *currnode = curr->get_node();
1315         Node *parnode = currnode->get_parent();
1316
1317         if ((parnode && !parnode->backtrack_empty()) ||
1318                          !currnode->misc_empty() ||
1319                          !currnode->read_from_empty() ||
1320                          !currnode->promise_empty() ||
1321                          !currnode->relseq_break_empty()) {
1322                 set_latest_backtrack(curr);
1323         }
1324 }
1325
1326 bool ModelExecution::promises_expired() const
1327 {
1328         for (unsigned int i = 0; i < promises.size(); i++) {
1329                 Promise *promise = promises[i];
1330                 if (promise->get_expiration() < priv->used_sequence_numbers)
1331                         return true;
1332         }
1333         return false;
1334 }
1335
1336 /**
1337  * This is the strongest feasibility check available.
1338  * @return whether the current trace (partial or complete) must be a prefix of
1339  * a feasible trace.
1340  */
1341 bool ModelExecution::isfeasibleprefix() const
1342 {
1343         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1344 }
1345
1346 /**
1347  * Print disagnostic information about an infeasible execution
1348  * @param prefix A string to prefix the output with; if NULL, then a default
1349  * message prefix will be provided
1350  */
1351 void ModelExecution::print_infeasibility(const char *prefix) const
1352 {
1353         char buf[100];
1354         char *ptr = buf;
1355         if (mo_graph->checkForCycles())
1356                 ptr += sprintf(ptr, "[mo cycle]");
1357         if (priv->failed_promise)
1358                 ptr += sprintf(ptr, "[failed promise]");
1359         if (priv->too_many_reads)
1360                 ptr += sprintf(ptr, "[too many reads]");
1361         if (priv->no_valid_reads)
1362                 ptr += sprintf(ptr, "[no valid reads-from]");
1363         if (priv->bad_synchronization)
1364                 ptr += sprintf(ptr, "[bad sw ordering]");
1365         if (promises_expired())
1366                 ptr += sprintf(ptr, "[promise expired]");
1367         if (promises.size() != 0)
1368                 ptr += sprintf(ptr, "[unresolved promise]");
1369         if (ptr != buf)
1370                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1371 }
1372
1373 /**
1374  * Returns whether the current completed trace is feasible, except for pending
1375  * release sequences.
1376  */
1377 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1378 {
1379         return !is_infeasible() && promises.size() == 0;
1380 }
1381
1382 /**
1383  * Check if the current partial trace is infeasible. Does not check any
1384  * end-of-execution flags, which might rule out the execution. Thus, this is
1385  * useful only for ruling an execution as infeasible.
1386  * @return whether the current partial trace is infeasible.
1387  */
1388 bool ModelExecution::is_infeasible() const
1389 {
1390         return mo_graph->checkForCycles() ||
1391                 priv->no_valid_reads ||
1392                 priv->failed_promise ||
1393                 priv->too_many_reads ||
1394                 priv->bad_synchronization ||
1395                 promises_expired();
1396 }
1397
1398 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1399 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1400         ModelAction *lastread = get_last_action(act->get_tid());
1401         lastread->process_rmw(act);
1402         if (act->is_rmw()) {
1403                 if (lastread->get_reads_from())
1404                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1405                 else
1406                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1407                 mo_graph->commitChanges();
1408         }
1409         return lastread;
1410 }
1411
1412 /**
1413  * A helper function for ModelExecution::check_recency, to check if the current
1414  * thread is able to read from a different write/promise for 'params.maxreads'
1415  * number of steps and if that write/promise should become visible (i.e., is
1416  * ordered later in the modification order). This helps model memory liveness.
1417  *
1418  * @param curr The current action. Must be a read.
1419  * @param rf The write/promise from which we plan to read
1420  * @param other_rf The write/promise from which we may read
1421  * @return True if we were able to read from other_rf for params.maxreads steps
1422  */
1423 template <typename T, typename U>
1424 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1425 {
1426         /* Need a different write/promise */
1427         if (other_rf->equals(rf))
1428                 return false;
1429
1430         /* Only look for "newer" writes/promises */
1431         if (!mo_graph->checkReachable(rf, other_rf))
1432                 return false;
1433
1434         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1435         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1436         action_list_t::reverse_iterator rit = list->rbegin();
1437         ASSERT((*rit) == curr);
1438         /* Skip past curr */
1439         rit++;
1440
1441         /* Does this write/promise work for everyone? */
1442         for (int i = 0; i < params->maxreads; i++, rit++) {
1443                 ModelAction *act = *rit;
1444                 if (!act->may_read_from(other_rf))
1445                         return false;
1446         }
1447         return true;
1448 }
1449
1450 /**
1451  * Checks whether a thread has read from the same write or Promise for too many
1452  * times without seeing the effects of a later write/Promise.
1453  *
1454  * Basic idea:
1455  * 1) there must a different write/promise that we could read from,
1456  * 2) we must have read from the same write/promise in excess of maxreads times,
1457  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1458  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1459  *
1460  * If so, we decide that the execution is no longer feasible.
1461  *
1462  * @param curr The current action. Must be a read.
1463  * @param rf The ModelAction/Promise from which we might read.
1464  * @return True if the read should succeed; false otherwise
1465  */
1466 template <typename T>
1467 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1468 {
1469         if (!params->maxreads)
1470                 return true;
1471
1472         //NOTE: Next check is just optimization, not really necessary....
1473         if (curr->get_node()->get_read_from_past_size() +
1474                         curr->get_node()->get_read_from_promise_size() <= 1)
1475                 return true;
1476
1477         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1478         int tid = id_to_int(curr->get_tid());
1479         ASSERT(tid < (int)thrd_lists->size());
1480         action_list_t *list = &(*thrd_lists)[tid];
1481         action_list_t::reverse_iterator rit = list->rbegin();
1482         ASSERT((*rit) == curr);
1483         /* Skip past curr */
1484         rit++;
1485
1486         action_list_t::reverse_iterator ritcopy = rit;
1487         /* See if we have enough reads from the same value */
1488         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1489                 if (ritcopy == list->rend())
1490                         return true;
1491                 ModelAction *act = *ritcopy;
1492                 if (!act->is_read())
1493                         return true;
1494                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1495                         return true;
1496                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1497                         return true;
1498                 if (act->get_node()->get_read_from_past_size() +
1499                                 act->get_node()->get_read_from_promise_size() <= 1)
1500                         return true;
1501         }
1502         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1503                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1504                 if (should_read_instead(curr, rf, write))
1505                         return false; /* liveness failure */
1506         }
1507         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1508                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1509                 if (should_read_instead(curr, rf, promise))
1510                         return false; /* liveness failure */
1511         }
1512         return true;
1513 }
1514
1515 /**
1516  * @brief Updates the mo_graph with the constraints imposed from the current
1517  * read.
1518  *
1519  * Basic idea is the following: Go through each other thread and find
1520  * the last action that happened before our read.  Two cases:
1521  *
1522  * -# The action is a write: that write must either occur before
1523  * the write we read from or be the write we read from.
1524  * -# The action is a read: the write that that action read from
1525  * must occur before the write we read from or be the same write.
1526  *
1527  * @param curr The current action. Must be a read.
1528  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1529  * @return True if modification order edges were added; false otherwise
1530  */
1531 template <typename rf_type>
1532 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1533 {
1534         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1535         unsigned int i;
1536         bool added = false;
1537         ASSERT(curr->is_read());
1538
1539         /* Last SC fence in the current thread */
1540         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1541         ModelAction *last_sc_write = NULL;
1542         if (curr->is_seqcst())
1543                 last_sc_write = get_last_seq_cst_write(curr);
1544
1545         /* Iterate over all threads */
1546         for (i = 0; i < thrd_lists->size(); i++) {
1547                 /* Last SC fence in thread i */
1548                 ModelAction *last_sc_fence_thread_local = NULL;
1549                 if (int_to_id((int)i) != curr->get_tid())
1550                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1551
1552                 /* Last SC fence in thread i, before last SC fence in current thread */
1553                 ModelAction *last_sc_fence_thread_before = NULL;
1554                 if (last_sc_fence_local)
1555                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1556
1557                 /* Iterate over actions in thread, starting from most recent */
1558                 action_list_t *list = &(*thrd_lists)[i];
1559                 action_list_t::reverse_iterator rit;
1560                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1561                         ModelAction *act = *rit;
1562
1563                         /* Skip curr */
1564                         if (act == curr)
1565                                 continue;
1566                         /* Don't want to add reflexive edges on 'rf' */
1567                         if (act->equals(rf)) {
1568                                 if (act->happens_before(curr))
1569                                         break;
1570                                 else
1571                                         continue;
1572                         }
1573
1574                         if (act->is_write()) {
1575                                 /* C++, Section 29.3 statement 5 */
1576                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1577                                                 *act < *last_sc_fence_thread_local) {
1578                                         added = mo_graph->addEdge(act, rf) || added;
1579                                         break;
1580                                 }
1581                                 /* C++, Section 29.3 statement 4 */
1582                                 else if (act->is_seqcst() && last_sc_fence_local &&
1583                                                 *act < *last_sc_fence_local) {
1584                                         added = mo_graph->addEdge(act, rf) || added;
1585                                         break;
1586                                 }
1587                                 /* C++, Section 29.3 statement 6 */
1588                                 else if (last_sc_fence_thread_before &&
1589                                                 *act < *last_sc_fence_thread_before) {
1590                                         added = mo_graph->addEdge(act, rf) || added;
1591                                         break;
1592                                 }
1593                         }
1594
1595                         /* C++, Section 29.3 statement 3 (second subpoint) */
1596                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1597                                 added = mo_graph->addEdge(act, rf) || added;
1598                                 break;
1599                         }
1600
1601                         /*
1602                          * Include at most one act per-thread that "happens
1603                          * before" curr
1604                          */
1605                         if (act->happens_before(curr)) {
1606                                 if (act->is_write()) {
1607                                         added = mo_graph->addEdge(act, rf) || added;
1608                                 } else {
1609                                         const ModelAction *prevrf = act->get_reads_from();
1610                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1611                                         if (prevrf) {
1612                                                 if (!prevrf->equals(rf))
1613                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1614                                         } else if (!prevrf_promise->equals(rf)) {
1615                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1616                                         }
1617                                 }
1618                                 break;
1619                         }
1620                 }
1621         }
1622
1623         /*
1624          * All compatible, thread-exclusive promises must be ordered after any
1625          * concrete loads from the same thread
1626          */
1627         for (unsigned int i = 0; i < promises.size(); i++)
1628                 if (promises[i]->is_compatible_exclusive(curr))
1629                         added = mo_graph->addEdge(rf, promises[i]) || added;
1630
1631         return added;
1632 }
1633
1634 /**
1635  * Updates the mo_graph with the constraints imposed from the current write.
1636  *
1637  * Basic idea is the following: Go through each other thread and find
1638  * the lastest action that happened before our write.  Two cases:
1639  *
1640  * (1) The action is a write => that write must occur before
1641  * the current write
1642  *
1643  * (2) The action is a read => the write that that action read from
1644  * must occur before the current write.
1645  *
1646  * This method also handles two other issues:
1647  *
1648  * (I) Sequential Consistency: Making sure that if the current write is
1649  * seq_cst, that it occurs after the previous seq_cst write.
1650  *
1651  * (II) Sending the write back to non-synchronizing reads.
1652  *
1653  * @param curr The current action. Must be a write.
1654  * @param send_fv A vector for stashing reads to which we may pass our future
1655  * value. If NULL, then don't record any future values.
1656  * @return True if modification order edges were added; false otherwise
1657  */
1658 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1659 {
1660         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1661         unsigned int i;
1662         bool added = false;
1663         ASSERT(curr->is_write());
1664
1665         if (curr->is_seqcst()) {
1666                 /* We have to at least see the last sequentially consistent write,
1667                          so we are initialized. */
1668                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1669                 if (last_seq_cst != NULL) {
1670                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1671                 }
1672         }
1673
1674         /* Last SC fence in the current thread */
1675         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1676
1677         /* Iterate over all threads */
1678         for (i = 0; i < thrd_lists->size(); i++) {
1679                 /* Last SC fence in thread i, before last SC fence in current thread */
1680                 ModelAction *last_sc_fence_thread_before = NULL;
1681                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1682                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1683
1684                 /* Iterate over actions in thread, starting from most recent */
1685                 action_list_t *list = &(*thrd_lists)[i];
1686                 action_list_t::reverse_iterator rit;
1687                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1688                         ModelAction *act = *rit;
1689                         if (act == curr) {
1690                                 /*
1691                                  * 1) If RMW and it actually read from something, then we
1692                                  * already have all relevant edges, so just skip to next
1693                                  * thread.
1694                                  *
1695                                  * 2) If RMW and it didn't read from anything, we should
1696                                  * whatever edge we can get to speed up convergence.
1697                                  *
1698                                  * 3) If normal write, we need to look at earlier actions, so
1699                                  * continue processing list.
1700                                  */
1701                                 if (curr->is_rmw()) {
1702                                         if (curr->get_reads_from() != NULL)
1703                                                 break;
1704                                         else
1705                                                 continue;
1706                                 } else
1707                                         continue;
1708                         }
1709
1710                         /* C++, Section 29.3 statement 7 */
1711                         if (last_sc_fence_thread_before && act->is_write() &&
1712                                         *act < *last_sc_fence_thread_before) {
1713                                 added = mo_graph->addEdge(act, curr) || added;
1714                                 break;
1715                         }
1716
1717                         /*
1718                          * Include at most one act per-thread that "happens
1719                          * before" curr
1720                          */
1721                         if (act->happens_before(curr)) {
1722                                 /*
1723                                  * Note: if act is RMW, just add edge:
1724                                  *   act --mo--> curr
1725                                  * The following edge should be handled elsewhere:
1726                                  *   readfrom(act) --mo--> act
1727                                  */
1728                                 if (act->is_write())
1729                                         added = mo_graph->addEdge(act, curr) || added;
1730                                 else if (act->is_read()) {
1731                                         //if previous read accessed a null, just keep going
1732                                         if (act->get_reads_from() == NULL)
1733                                                 continue;
1734                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1735                                 }
1736                                 break;
1737                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1738                                                      !act->same_thread(curr)) {
1739                                 /* We have an action that:
1740                                    (1) did not happen before us
1741                                    (2) is a read and we are a write
1742                                    (3) cannot synchronize with us
1743                                    (4) is in a different thread
1744                                    =>
1745                                    that read could potentially read from our write.  Note that
1746                                    these checks are overly conservative at this point, we'll
1747                                    do more checks before actually removing the
1748                                    pendingfuturevalue.
1749
1750                                  */
1751                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1752                                         if (!is_infeasible())
1753                                                 send_fv->push_back(act);
1754                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1755                                                 add_future_value(curr, act);
1756                                 }
1757                         }
1758                 }
1759         }
1760
1761         /*
1762          * All compatible, thread-exclusive promises must be ordered after any
1763          * concrete stores to the same thread, or else they can be merged with
1764          * this store later
1765          */
1766         for (unsigned int i = 0; i < promises.size(); i++)
1767                 if (promises[i]->is_compatible_exclusive(curr))
1768                         added = mo_graph->addEdge(curr, promises[i]) || added;
1769
1770         return added;
1771 }
1772
1773 /** Arbitrary reads from the future are not allowed.  Section 29.3
1774  * part 9 places some constraints.  This method checks one result of constraint
1775  * constraint.  Others require compiler support. */
1776 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1777 {
1778         if (!writer->is_rmw())
1779                 return true;
1780
1781         if (!reader->is_rmw())
1782                 return true;
1783
1784         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1785                 if (search == reader)
1786                         return false;
1787                 if (search->get_tid() == reader->get_tid() &&
1788                                 search->happens_before(reader))
1789                         break;
1790         }
1791
1792         return true;
1793 }
1794
1795 /**
1796  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1797  * some constraints. This method checks one the following constraint (others
1798  * require compiler support):
1799  *
1800  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1801  */
1802 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1803 {
1804         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1805         unsigned int i;
1806         /* Iterate over all threads */
1807         for (i = 0; i < thrd_lists->size(); i++) {
1808                 const ModelAction *write_after_read = NULL;
1809
1810                 /* Iterate over actions in thread, starting from most recent */
1811                 action_list_t *list = &(*thrd_lists)[i];
1812                 action_list_t::reverse_iterator rit;
1813                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1814                         ModelAction *act = *rit;
1815
1816                         /* Don't disallow due to act == reader */
1817                         if (!reader->happens_before(act) || reader == act)
1818                                 break;
1819                         else if (act->is_write())
1820                                 write_after_read = act;
1821                         else if (act->is_read() && act->get_reads_from() != NULL)
1822                                 write_after_read = act->get_reads_from();
1823                 }
1824
1825                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1826                         return false;
1827         }
1828         return true;
1829 }
1830
1831 /**
1832  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1833  * The ModelAction under consideration is expected to be taking part in
1834  * release/acquire synchronization as an object of the "reads from" relation.
1835  * Note that this can only provide release sequence support for RMW chains
1836  * which do not read from the future, as those actions cannot be traced until
1837  * their "promise" is fulfilled. Similarly, we may not even establish the
1838  * presence of a release sequence with certainty, as some modification order
1839  * constraints may be decided further in the future. Thus, this function
1840  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1841  * and a boolean representing certainty.
1842  *
1843  * @param rf The action that might be part of a release sequence. Must be a
1844  * write.
1845  * @param release_heads A pass-by-reference style return parameter. After
1846  * execution of this function, release_heads will contain the heads of all the
1847  * relevant release sequences, if any exists with certainty
1848  * @param pending A pass-by-reference style return parameter which is only used
1849  * when returning false (i.e., uncertain). Returns most information regarding
1850  * an uncertain release sequence, including any write operations that might
1851  * break the sequence.
1852  * @return true, if the ModelExecution is certain that release_heads is complete;
1853  * false otherwise
1854  */
1855 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1856                 rel_heads_list_t *release_heads,
1857                 struct release_seq *pending) const
1858 {
1859         /* Only check for release sequences if there are no cycles */
1860         if (mo_graph->checkForCycles())
1861                 return false;
1862
1863         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1864                 ASSERT(rf->is_write());
1865
1866                 if (rf->is_release())
1867                         release_heads->push_back(rf);
1868                 else if (rf->get_last_fence_release())
1869                         release_heads->push_back(rf->get_last_fence_release());
1870                 if (!rf->is_rmw())
1871                         break; /* End of RMW chain */
1872
1873                 /** @todo Need to be smarter here...  In the linux lock
1874                  * example, this will run to the beginning of the program for
1875                  * every acquire. */
1876                 /** @todo The way to be smarter here is to keep going until 1
1877                  * thread has a release preceded by an acquire and you've seen
1878                  *       both. */
1879
1880                 /* acq_rel RMW is a sufficient stopping condition */
1881                 if (rf->is_acquire() && rf->is_release())
1882                         return true; /* complete */
1883         };
1884         if (!rf) {
1885                 /* read from future: need to settle this later */
1886                 pending->rf = NULL;
1887                 return false; /* incomplete */
1888         }
1889
1890         if (rf->is_release())
1891                 return true; /* complete */
1892
1893         /* else relaxed write
1894          * - check for fence-release in the same thread (29.8, stmt. 3)
1895          * - check modification order for contiguous subsequence
1896          *   -> rf must be same thread as release */
1897
1898         const ModelAction *fence_release = rf->get_last_fence_release();
1899         /* Synchronize with a fence-release unconditionally; we don't need to
1900          * find any more "contiguous subsequence..." for it */
1901         if (fence_release)
1902                 release_heads->push_back(fence_release);
1903
1904         int tid = id_to_int(rf->get_tid());
1905         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1906         action_list_t *list = &(*thrd_lists)[tid];
1907         action_list_t::const_reverse_iterator rit;
1908
1909         /* Find rf in the thread list */
1910         rit = std::find(list->rbegin(), list->rend(), rf);
1911         ASSERT(rit != list->rend());
1912
1913         /* Find the last {write,fence}-release */
1914         for (; rit != list->rend(); rit++) {
1915                 if (fence_release && *(*rit) < *fence_release)
1916                         break;
1917                 if ((*rit)->is_release())
1918                         break;
1919         }
1920         if (rit == list->rend()) {
1921                 /* No write-release in this thread */
1922                 return true; /* complete */
1923         } else if (fence_release && *(*rit) < *fence_release) {
1924                 /* The fence-release is more recent (and so, "stronger") than
1925                  * the most recent write-release */
1926                 return true; /* complete */
1927         } /* else, need to establish contiguous release sequence */
1928         ModelAction *release = *rit;
1929
1930         ASSERT(rf->same_thread(release));
1931
1932         pending->writes.clear();
1933
1934         bool certain = true;
1935         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1936                 if (id_to_int(rf->get_tid()) == (int)i)
1937                         continue;
1938                 list = &(*thrd_lists)[i];
1939
1940                 /* Can we ensure no future writes from this thread may break
1941                  * the release seq? */
1942                 bool future_ordered = false;
1943
1944                 ModelAction *last = get_last_action(int_to_id(i));
1945                 Thread *th = get_thread(int_to_id(i));
1946                 if ((last && rf->happens_before(last)) ||
1947                                 !is_enabled(th) ||
1948                                 th->is_complete())
1949                         future_ordered = true;
1950
1951                 ASSERT(!th->is_model_thread() || future_ordered);
1952
1953                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1954                         const ModelAction *act = *rit;
1955                         /* Reach synchronization -> this thread is complete */
1956                         if (act->happens_before(release))
1957                                 break;
1958                         if (rf->happens_before(act)) {
1959                                 future_ordered = true;
1960                                 continue;
1961                         }
1962
1963                         /* Only non-RMW writes can break release sequences */
1964                         if (!act->is_write() || act->is_rmw())
1965                                 continue;
1966
1967                         /* Check modification order */
1968                         if (mo_graph->checkReachable(rf, act)) {
1969                                 /* rf --mo--> act */
1970                                 future_ordered = true;
1971                                 continue;
1972                         }
1973                         if (mo_graph->checkReachable(act, release))
1974                                 /* act --mo--> release */
1975                                 break;
1976                         if (mo_graph->checkReachable(release, act) &&
1977                                       mo_graph->checkReachable(act, rf)) {
1978                                 /* release --mo-> act --mo--> rf */
1979                                 return true; /* complete */
1980                         }
1981                         /* act may break release sequence */
1982                         pending->writes.push_back(act);
1983                         certain = false;
1984                 }
1985                 if (!future_ordered)
1986                         certain = false; /* This thread is uncertain */
1987         }
1988
1989         if (certain) {
1990                 release_heads->push_back(release);
1991                 pending->writes.clear();
1992         } else {
1993                 pending->release = release;
1994                 pending->rf = rf;
1995         }
1996         return certain;
1997 }
1998
1999 /**
2000  * An interface for getting the release sequence head(s) with which a
2001  * given ModelAction must synchronize. This function only returns a non-empty
2002  * result when it can locate a release sequence head with certainty. Otherwise,
2003  * it may mark the internal state of the ModelExecution so that it will handle
2004  * the release sequence at a later time, causing @a acquire to update its
2005  * synchronization at some later point in execution.
2006  *
2007  * @param acquire The 'acquire' action that may synchronize with a release
2008  * sequence
2009  * @param read The read action that may read from a release sequence; this may
2010  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2011  * when 'acquire' is a fence-acquire)
2012  * @param release_heads A pass-by-reference return parameter. Will be filled
2013  * with the head(s) of the release sequence(s), if they exists with certainty.
2014  * @see ModelExecution::release_seq_heads
2015  */
2016 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2017                 ModelAction *read, rel_heads_list_t *release_heads)
2018 {
2019         const ModelAction *rf = read->get_reads_from();
2020         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2021         sequence->acquire = acquire;
2022         sequence->read = read;
2023
2024         if (!release_seq_heads(rf, release_heads, sequence)) {
2025                 /* add act to 'lazy checking' list */
2026                 pending_rel_seqs.push_back(sequence);
2027         } else {
2028                 snapshot_free(sequence);
2029         }
2030 }
2031
2032 /**
2033  * Attempt to resolve all stashed operations that might synchronize with a
2034  * release sequence for a given location. This implements the "lazy" portion of
2035  * determining whether or not a release sequence was contiguous, since not all
2036  * modification order information is present at the time an action occurs.
2037  *
2038  * @param location The location/object that should be checked for release
2039  * sequence resolutions. A NULL value means to check all locations.
2040  * @param work_queue The work queue to which to add work items as they are
2041  * generated
2042  * @return True if any updates occurred (new synchronization, new mo_graph
2043  * edges)
2044  */
2045 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2046 {
2047         bool updated = false;
2048         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2049         while (it != pending_rel_seqs.end()) {
2050                 struct release_seq *pending = *it;
2051                 ModelAction *acquire = pending->acquire;
2052                 const ModelAction *read = pending->read;
2053
2054                 /* Only resolve sequences on the given location, if provided */
2055                 if (location && read->get_location() != location) {
2056                         it++;
2057                         continue;
2058                 }
2059
2060                 const ModelAction *rf = read->get_reads_from();
2061                 rel_heads_list_t release_heads;
2062                 bool complete;
2063                 complete = release_seq_heads(rf, &release_heads, pending);
2064                 for (unsigned int i = 0; i < release_heads.size(); i++)
2065                         if (!acquire->has_synchronized_with(release_heads[i]))
2066                                 if (synchronize(release_heads[i], acquire))
2067                                         updated = true;
2068
2069                 if (updated) {
2070                         /* Re-check all pending release sequences */
2071                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2072                         /* Re-check read-acquire for mo_graph edges */
2073                         if (acquire->is_read())
2074                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2075
2076                         /* propagate synchronization to later actions */
2077                         action_list_t::reverse_iterator rit = action_trace.rbegin();
2078                         for (; (*rit) != acquire; rit++) {
2079                                 ModelAction *propagate = *rit;
2080                                 if (acquire->happens_before(propagate)) {
2081                                         synchronize(acquire, propagate);
2082                                         /* Re-check 'propagate' for mo_graph edges */
2083                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2084                                 }
2085                         }
2086                 }
2087                 if (complete) {
2088                         it = pending_rel_seqs.erase(it);
2089                         snapshot_free(pending);
2090                 } else {
2091                         it++;
2092                 }
2093         }
2094
2095         // If we resolved promises or data races, see if we have realized a data race.
2096         checkDataRaces();
2097
2098         return updated;
2099 }
2100
2101 /**
2102  * Performs various bookkeeping operations for the current ModelAction. For
2103  * instance, adds action to the per-object, per-thread action vector and to the
2104  * action trace list of all thread actions.
2105  *
2106  * @param act is the ModelAction to add.
2107  */
2108 void ModelExecution::add_action_to_lists(ModelAction *act)
2109 {
2110         int tid = id_to_int(act->get_tid());
2111         ModelAction *uninit = NULL;
2112         int uninit_id = -1;
2113         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2114         if (list->empty() && act->is_atomic_var()) {
2115                 uninit = get_uninitialized_action(act);
2116                 uninit_id = id_to_int(uninit->get_tid());
2117                 list->push_front(uninit);
2118         }
2119         list->push_back(act);
2120
2121         action_trace.push_back(act);
2122         if (uninit)
2123                 action_trace.push_front(uninit);
2124
2125         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2126         if (tid >= (int)vec->size())
2127                 vec->resize(priv->next_thread_id);
2128         (*vec)[tid].push_back(act);
2129         if (uninit)
2130                 (*vec)[uninit_id].push_front(uninit);
2131
2132         if ((int)thrd_last_action.size() <= tid)
2133                 thrd_last_action.resize(get_num_threads());
2134         thrd_last_action[tid] = act;
2135         if (uninit)
2136                 thrd_last_action[uninit_id] = uninit;
2137
2138         if (act->is_fence() && act->is_release()) {
2139                 if ((int)thrd_last_fence_release.size() <= tid)
2140                         thrd_last_fence_release.resize(get_num_threads());
2141                 thrd_last_fence_release[tid] = act;
2142         }
2143
2144         if (act->is_wait()) {
2145                 void *mutex_loc = (void *) act->get_value();
2146                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2147
2148                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2149                 if (tid >= (int)vec->size())
2150                         vec->resize(priv->next_thread_id);
2151                 (*vec)[tid].push_back(act);
2152         }
2153 }
2154
2155 /**
2156  * @brief Get the last action performed by a particular Thread
2157  * @param tid The thread ID of the Thread in question
2158  * @return The last action in the thread
2159  */
2160 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2161 {
2162         int threadid = id_to_int(tid);
2163         if (threadid < (int)thrd_last_action.size())
2164                 return thrd_last_action[id_to_int(tid)];
2165         else
2166                 return NULL;
2167 }
2168
2169 /**
2170  * @brief Get the last fence release performed by a particular Thread
2171  * @param tid The thread ID of the Thread in question
2172  * @return The last fence release in the thread, if one exists; NULL otherwise
2173  */
2174 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2175 {
2176         int threadid = id_to_int(tid);
2177         if (threadid < (int)thrd_last_fence_release.size())
2178                 return thrd_last_fence_release[id_to_int(tid)];
2179         else
2180                 return NULL;
2181 }
2182
2183 /**
2184  * Gets the last memory_order_seq_cst write (in the total global sequence)
2185  * performed on a particular object (i.e., memory location), not including the
2186  * current action.
2187  * @param curr The current ModelAction; also denotes the object location to
2188  * check
2189  * @return The last seq_cst write
2190  */
2191 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2192 {
2193         void *location = curr->get_location();
2194         action_list_t *list = obj_map.get(location);
2195         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2196         action_list_t::reverse_iterator rit;
2197         for (rit = list->rbegin(); (*rit) != curr; rit++)
2198                 ;
2199         rit++; /* Skip past curr */
2200         for ( ; rit != list->rend(); rit++)
2201                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2202                         return *rit;
2203         return NULL;
2204 }
2205
2206 /**
2207  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2208  * performed in a particular thread, prior to a particular fence.
2209  * @param tid The ID of the thread to check
2210  * @param before_fence The fence from which to begin the search; if NULL, then
2211  * search for the most recent fence in the thread.
2212  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2213  */
2214 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2215 {
2216         /* All fences should have location FENCE_LOCATION */
2217         action_list_t *list = obj_map.get(FENCE_LOCATION);
2218
2219         if (!list)
2220                 return NULL;
2221
2222         action_list_t::reverse_iterator rit = list->rbegin();
2223
2224         if (before_fence) {
2225                 for (; rit != list->rend(); rit++)
2226                         if (*rit == before_fence)
2227                                 break;
2228
2229                 ASSERT(*rit == before_fence);
2230                 rit++;
2231         }
2232
2233         for (; rit != list->rend(); rit++)
2234                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2235                         return *rit;
2236         return NULL;
2237 }
2238
2239 /**
2240  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2241  * location). This function identifies the mutex according to the current
2242  * action, which is presumed to perform on the same mutex.
2243  * @param curr The current ModelAction; also denotes the object location to
2244  * check
2245  * @return The last unlock operation
2246  */
2247 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2248 {
2249         void *location = curr->get_location();
2250         action_list_t *list = obj_map.get(location);
2251         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2252         action_list_t::reverse_iterator rit;
2253         for (rit = list->rbegin(); rit != list->rend(); rit++)
2254                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2255                         return *rit;
2256         return NULL;
2257 }
2258
2259 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2260 {
2261         ModelAction *parent = get_last_action(tid);
2262         if (!parent)
2263                 parent = get_thread(tid)->get_creation();
2264         return parent;
2265 }
2266
2267 /**
2268  * Returns the clock vector for a given thread.
2269  * @param tid The thread whose clock vector we want
2270  * @return Desired clock vector
2271  */
2272 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2273 {
2274         return get_parent_action(tid)->get_cv();
2275 }
2276
2277 /**
2278  * @brief Find the promise (if any) to resolve for the current action and
2279  * remove it from the pending promise vector
2280  * @param curr The current ModelAction. Should be a write.
2281  * @return The Promise to resolve, if any; otherwise NULL
2282  */
2283 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2284 {
2285         for (unsigned int i = 0; i < promises.size(); i++)
2286                 if (curr->get_node()->get_promise(i)) {
2287                         Promise *ret = promises[i];
2288                         promises.erase(promises.begin() + i);
2289                         return ret;
2290                 }
2291         return NULL;
2292 }
2293
2294 /**
2295  * Resolve a Promise with a current write.
2296  * @param write The ModelAction that is fulfilling Promises
2297  * @param promise The Promise to resolve
2298  * @return True if the Promise was successfully resolved; false otherwise
2299  */
2300 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2301 {
2302         ModelVector<ModelAction *> actions_to_check;
2303
2304         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2305                 ModelAction *read = promise->get_reader(i);
2306                 read_from(read, write);
2307                 actions_to_check.push_back(read);
2308         }
2309         /* Make sure the promise's value matches the write's value */
2310         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2311         if (!mo_graph->resolvePromise(promise, write))
2312                 priv->failed_promise = true;
2313
2314         /**
2315          * @todo  It is possible to end up in an inconsistent state, where a
2316          * "resolved" promise may still be referenced if
2317          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2318          *
2319          * Note that the inconsistency only matters when dumping mo_graph to
2320          * file.
2321          *
2322          * delete promise;
2323          */
2324
2325         //Check whether reading these writes has made threads unable to
2326         //resolve promises
2327         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2328                 ModelAction *read = actions_to_check[i];
2329                 mo_check_promises(read, true);
2330         }
2331
2332         return true;
2333 }
2334
2335 /**
2336  * Compute the set of promises that could potentially be satisfied by this
2337  * action. Note that the set computation actually appears in the Node, not in
2338  * ModelExecution.
2339  * @param curr The ModelAction that may satisfy promises
2340  */
2341 void ModelExecution::compute_promises(ModelAction *curr)
2342 {
2343         for (unsigned int i = 0; i < promises.size(); i++) {
2344                 Promise *promise = promises[i];
2345                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2346                         continue;
2347
2348                 bool satisfy = true;
2349                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2350                         const ModelAction *act = promise->get_reader(j);
2351                         if (act->happens_before(curr) ||
2352                                         act->could_synchronize_with(curr)) {
2353                                 satisfy = false;
2354                                 break;
2355                         }
2356                 }
2357                 if (satisfy)
2358                         curr->get_node()->set_promise(i);
2359         }
2360 }
2361
2362 /** Checks promises in response to change in ClockVector Threads. */
2363 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2364 {
2365         for (unsigned int i = 0; i < promises.size(); i++) {
2366                 Promise *promise = promises[i];
2367                 if (!promise->thread_is_available(tid))
2368                         continue;
2369                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2370                         const ModelAction *act = promise->get_reader(j);
2371                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2372                                         merge_cv->synchronized_since(act)) {
2373                                 if (promise->eliminate_thread(tid)) {
2374                                         /* Promise has failed */
2375                                         priv->failed_promise = true;
2376                                         return;
2377                                 }
2378                         }
2379                 }
2380         }
2381 }
2382
2383 void ModelExecution::check_promises_thread_disabled()
2384 {
2385         for (unsigned int i = 0; i < promises.size(); i++) {
2386                 Promise *promise = promises[i];
2387                 if (promise->has_failed()) {
2388                         priv->failed_promise = true;
2389                         return;
2390                 }
2391         }
2392 }
2393
2394 /**
2395  * @brief Checks promises in response to addition to modification order for
2396  * threads.
2397  *
2398  * We test whether threads are still available for satisfying promises after an
2399  * addition to our modification order constraints. Those that are unavailable
2400  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2401  * that promise has failed.
2402  *
2403  * @param act The ModelAction which updated the modification order
2404  * @param is_read_check Should be true if act is a read and we must check for
2405  * updates to the store from which it read (there is a distinction here for
2406  * RMW's, which are both a load and a store)
2407  */
2408 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2409 {
2410         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2411
2412         for (unsigned int i = 0; i < promises.size(); i++) {
2413                 Promise *promise = promises[i];
2414
2415                 // Is this promise on the same location?
2416                 if (!promise->same_location(write))
2417                         continue;
2418
2419                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2420                         const ModelAction *pread = promise->get_reader(j);
2421                         if (!pread->happens_before(act))
2422                                continue;
2423                         if (mo_graph->checkPromise(write, promise)) {
2424                                 priv->failed_promise = true;
2425                                 return;
2426                         }
2427                         break;
2428                 }
2429
2430                 // Don't do any lookups twice for the same thread
2431                 if (!promise->thread_is_available(act->get_tid()))
2432                         continue;
2433
2434                 if (mo_graph->checkReachable(promise, write)) {
2435                         if (mo_graph->checkPromise(write, promise)) {
2436                                 priv->failed_promise = true;
2437                                 return;
2438                         }
2439                 }
2440         }
2441 }
2442
2443 /**
2444  * Compute the set of writes that may break the current pending release
2445  * sequence. This information is extracted from previou release sequence
2446  * calculations.
2447  *
2448  * @param curr The current ModelAction. Must be a release sequence fixup
2449  * action.
2450  */
2451 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2452 {
2453         if (pending_rel_seqs.empty())
2454                 return;
2455
2456         struct release_seq *pending = pending_rel_seqs.back();
2457         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2458                 const ModelAction *write = pending->writes[i];
2459                 curr->get_node()->add_relseq_break(write);
2460         }
2461
2462         /* NULL means don't break the sequence; just synchronize */
2463         curr->get_node()->add_relseq_break(NULL);
2464 }
2465
2466 /**
2467  * Build up an initial set of all past writes that this 'read' action may read
2468  * from, as well as any previously-observed future values that must still be valid.
2469  *
2470  * @param curr is the current ModelAction that we are exploring; it must be a
2471  * 'read' operation.
2472  */
2473 void ModelExecution::build_may_read_from(ModelAction *curr)
2474 {
2475         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2476         unsigned int i;
2477         ASSERT(curr->is_read());
2478
2479         ModelAction *last_sc_write = NULL;
2480
2481         if (curr->is_seqcst())
2482                 last_sc_write = get_last_seq_cst_write(curr);
2483
2484         /* Iterate over all threads */
2485         for (i = 0; i < thrd_lists->size(); i++) {
2486                 /* Iterate over actions in thread, starting from most recent */
2487                 action_list_t *list = &(*thrd_lists)[i];
2488                 action_list_t::reverse_iterator rit;
2489                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2490                         ModelAction *act = *rit;
2491
2492                         /* Only consider 'write' actions */
2493                         if (!act->is_write() || act == curr)
2494                                 continue;
2495
2496                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2497                         bool allow_read = true;
2498
2499                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2500                                 allow_read = false;
2501                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2502                                 allow_read = false;
2503
2504                         if (allow_read) {
2505                                 /* Only add feasible reads */
2506                                 mo_graph->startChanges();
2507                                 r_modification_order(curr, act);
2508                                 if (!is_infeasible())
2509                                         curr->get_node()->add_read_from_past(act);
2510                                 mo_graph->rollbackChanges();
2511                         }
2512
2513                         /* Include at most one act per-thread that "happens before" curr */
2514                         if (act->happens_before(curr))
2515                                 break;
2516                 }
2517         }
2518
2519         /* Inherit existing, promised future values */
2520         for (i = 0; i < promises.size(); i++) {
2521                 const Promise *promise = promises[i];
2522                 const ModelAction *promise_read = promise->get_reader(0);
2523                 if (promise_read->same_var(curr)) {
2524                         /* Only add feasible future-values */
2525                         mo_graph->startChanges();
2526                         r_modification_order(curr, promise);
2527                         if (!is_infeasible())
2528                                 curr->get_node()->add_read_from_promise(promise_read);
2529                         mo_graph->rollbackChanges();
2530                 }
2531         }
2532
2533         /* We may find no valid may-read-from only if the execution is doomed */
2534         if (!curr->get_node()->read_from_size()) {
2535                 priv->no_valid_reads = true;
2536                 set_assert();
2537         }
2538
2539         if (DBG_ENABLED()) {
2540                 model_print("Reached read action:\n");
2541                 curr->print();
2542                 model_print("Printing read_from_past\n");
2543                 curr->get_node()->print_read_from_past();
2544                 model_print("End printing read_from_past\n");
2545         }
2546 }
2547
2548 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2549 {
2550         for ( ; write != NULL; write = write->get_reads_from()) {
2551                 /* UNINIT actions don't have a Node, and they never sleep */
2552                 if (write->is_uninitialized())
2553                         return true;
2554                 Node *prevnode = write->get_node()->get_parent();
2555
2556                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2557                 if (write->is_release() && thread_sleep)
2558                         return true;
2559                 if (!write->is_rmw())
2560                         return false;
2561         }
2562         return true;
2563 }
2564
2565 /**
2566  * @brief Get an action representing an uninitialized atomic
2567  *
2568  * This function may create a new one or try to retrieve one from the NodeStack
2569  *
2570  * @param curr The current action, which prompts the creation of an UNINIT action
2571  * @return A pointer to the UNINIT ModelAction
2572  */
2573 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2574 {
2575         Node *node = curr->get_node();
2576         ModelAction *act = node->get_uninit_action();
2577         if (!act) {
2578                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2579                 node->set_uninit_action(act);
2580         }
2581         act->create_cv(NULL);
2582         return act;
2583 }
2584
2585 static void print_list(const action_list_t *list)
2586 {
2587         action_list_t::const_iterator it;
2588
2589         model_print("---------------------------------------------------------------------\n");
2590
2591         unsigned int hash = 0;
2592
2593         for (it = list->begin(); it != list->end(); it++) {
2594                 const ModelAction *act = *it;
2595                 if (act->get_seq_number() > 0)
2596                         act->print();
2597                 hash = hash^(hash<<3)^((*it)->hash());
2598         }
2599         model_print("HASH %u\n", hash);
2600         model_print("---------------------------------------------------------------------\n");
2601 }
2602
2603 #if SUPPORT_MOD_ORDER_DUMP
2604 void ModelExecution::dumpGraph(char *filename) const
2605 {
2606         char buffer[200];
2607         sprintf(buffer, "%s.dot", filename);
2608         FILE *file = fopen(buffer, "w");
2609         fprintf(file, "digraph %s {\n", filename);
2610         mo_graph->dumpNodes(file);
2611         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2612
2613         for (action_list_t::iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2614                 ModelAction *act = *it;
2615                 if (act->is_read()) {
2616                         mo_graph->dot_print_node(file, act);
2617                         if (act->get_reads_from())
2618                                 mo_graph->dot_print_edge(file,
2619                                                 act->get_reads_from(),
2620                                                 act,
2621                                                 "label=\"rf\", color=red, weight=2");
2622                         else
2623                                 mo_graph->dot_print_edge(file,
2624                                                 act->get_reads_from_promise(),
2625                                                 act,
2626                                                 "label=\"rf\", color=red");
2627                 }
2628                 if (thread_array[act->get_tid()]) {
2629                         mo_graph->dot_print_edge(file,
2630                                         thread_array[id_to_int(act->get_tid())],
2631                                         act,
2632                                         "label=\"sb\", color=blue, weight=400");
2633                 }
2634
2635                 thread_array[act->get_tid()] = act;
2636         }
2637         fprintf(file, "}\n");
2638         model_free(thread_array);
2639         fclose(file);
2640 }
2641 #endif
2642
2643 /** @brief Prints an execution trace summary. */
2644 void ModelExecution::print_summary() const
2645 {
2646 #if SUPPORT_MOD_ORDER_DUMP
2647         char buffername[100];
2648         sprintf(buffername, "exec%04u", get_execution_number());
2649         mo_graph->dumpGraphToFile(buffername);
2650         sprintf(buffername, "graph%04u", get_execution_number());
2651         dumpGraph(buffername);
2652 #endif
2653
2654         model_print("Execution %d:", get_execution_number());
2655         if (isfeasibleprefix()) {
2656                 if (scheduler->all_threads_sleeping())
2657                         model_print(" SLEEP-SET REDUNDANT");
2658                 model_print("\n");
2659         } else
2660                 print_infeasibility(" INFEASIBLE");
2661         print_list(&action_trace);
2662         model_print("\n");
2663         if (!promises.empty()) {
2664                 model_print("Pending promises:\n");
2665                 for (unsigned int i = 0; i < promises.size(); i++) {
2666                         model_print(" [P%u] ", i);
2667                         promises[i]->print();
2668                 }
2669                 model_print("\n");
2670         }
2671 }
2672
2673 /**
2674  * Add a Thread to the system for the first time. Should only be called once
2675  * per thread.
2676  * @param t The Thread to add
2677  */
2678 void ModelExecution::add_thread(Thread *t)
2679 {
2680         unsigned int i = id_to_int(t->get_id());
2681         if (i >= thread_map.size())
2682                 thread_map.resize(i + 1);
2683         thread_map[i] = t;
2684         if (!t->is_model_thread())
2685                 scheduler->add_thread(t);
2686 }
2687
2688 /**
2689  * @brief Get a Thread reference by its ID
2690  * @param tid The Thread's ID
2691  * @return A Thread reference
2692  */
2693 Thread * ModelExecution::get_thread(thread_id_t tid) const
2694 {
2695         unsigned int i = id_to_int(tid);
2696         if (i < thread_map.size())
2697                 return thread_map[i];
2698         return NULL;
2699 }
2700
2701 /**
2702  * @brief Get a reference to the Thread in which a ModelAction was executed
2703  * @param act The ModelAction
2704  * @return A Thread reference
2705  */
2706 Thread * ModelExecution::get_thread(const ModelAction *act) const
2707 {
2708         return get_thread(act->get_tid());
2709 }
2710
2711 /**
2712  * @brief Get a Promise's "promise number"
2713  *
2714  * A "promise number" is an index number that is unique to a promise, valid
2715  * only for a specific snapshot of an execution trace. Promises may come and go
2716  * as they are generated an resolved, so an index only retains meaning for the
2717  * current snapshot.
2718  *
2719  * @param promise The Promise to check
2720  * @return The promise index, if the promise still is valid; otherwise -1
2721  */
2722 int ModelExecution::get_promise_number(const Promise *promise) const
2723 {
2724         for (unsigned int i = 0; i < promises.size(); i++)
2725                 if (promises[i] == promise)
2726                         return i;
2727         /* Not found */
2728         return -1;
2729 }
2730
2731 /**
2732  * @brief Check if a Thread is currently enabled
2733  * @param t The Thread to check
2734  * @return True if the Thread is currently enabled
2735  */
2736 bool ModelExecution::is_enabled(Thread *t) const
2737 {
2738         return scheduler->is_enabled(t);
2739 }
2740
2741 /**
2742  * @brief Check if a Thread is currently enabled
2743  * @param tid The ID of the Thread to check
2744  * @return True if the Thread is currently enabled
2745  */
2746 bool ModelExecution::is_enabled(thread_id_t tid) const
2747 {
2748         return scheduler->is_enabled(tid);
2749 }
2750
2751 /**
2752  * @brief Select the next thread to execute based on the curren action
2753  *
2754  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2755  * actions should be followed by the execution of their child thread. In either
2756  * case, the current action should determine the next thread schedule.
2757  *
2758  * @param curr The current action
2759  * @return The next thread to run, if the current action will determine this
2760  * selection; otherwise NULL
2761  */
2762 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2763 {
2764         /* Do not split atomic RMW */
2765         if (curr->is_rmwr())
2766                 return get_thread(curr);
2767         /* Follow CREATE with the created thread */
2768         if (curr->get_type() == THREAD_CREATE)
2769                 return curr->get_thread_operand();
2770         return NULL;
2771 }
2772
2773 /** @return True if the execution has taken too many steps */
2774 bool ModelExecution::too_many_steps() const
2775 {
2776         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2777 }
2778
2779 /**
2780  * Takes the next step in the execution, if possible.
2781  * @param curr The current step to take
2782  * @return Returns the next Thread to run, if any; NULL if this execution
2783  * should terminate
2784  */
2785 Thread * ModelExecution::take_step(ModelAction *curr)
2786 {
2787         Thread *curr_thrd = get_thread(curr);
2788         ASSERT(curr_thrd->get_state() == THREAD_READY);
2789
2790         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2791         curr = check_current_action(curr);
2792         ASSERT(curr);
2793
2794         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2795                 scheduler->remove_thread(curr_thrd);
2796
2797         return action_select_next_thread(curr);
2798 }
2799
2800 /**
2801  * Launch end-of-execution release sequence fixups only when
2802  * the execution is otherwise feasible AND there are:
2803  *
2804  * (1) pending release sequences
2805  * (2) pending assertions that could be invalidated by a change
2806  * in clock vectors (i.e., data races)
2807  * (3) no pending promises
2808  */
2809 void ModelExecution::fixup_release_sequences()
2810 {
2811         while (!pending_rel_seqs.empty() &&
2812                         is_feasible_prefix_ignore_relseq() &&
2813                         !unrealizedraces.empty()) {
2814                 model_print("*** WARNING: release sequence fixup action "
2815                                 "(%zu pending release seuqence(s)) ***\n",
2816                                 pending_rel_seqs.size());
2817                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2818                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2819                                 model_thread);
2820                 take_step(fixup);
2821         };
2822 }