execution: make structure snapshotting
[cdsspec-compiler.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool too_many_reads;
51         bool no_valid_reads;
52         /** @brief Incorrectly-ordered synchronization was made */
53         bool bad_synchronization;
54         bool asserted;
55
56         SNAPSHOTALLOC
57 };
58
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61                 struct model_params *params,
62                 Scheduler *scheduler,
63                 NodeStack *node_stack) :
64         model(m),
65         params(params),
66         scheduler(scheduler),
67         action_trace(new action_list_t()),
68         thread_map(new HashTable<int, Thread *, int>()),
69         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
70         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
71         obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
72         promises(new SnapVector<Promise *>()),
73         futurevalues(new SnapVector<struct PendingFutureValue>()),
74         pending_rel_seqs(new SnapVector<struct release_seq *>()),
75         thrd_last_action(new SnapVector<ModelAction *>(1)),
76         thrd_last_fence_release(new SnapVector<ModelAction *>()),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());
83         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
84         scheduler->register_engine(this);
85 }
86
87 /** @brief Destructor */
88 ModelExecution::~ModelExecution()
89 {
90         for (unsigned int i = 0; i < get_num_threads(); i++)
91                 delete thread_map->get(i);
92         delete thread_map;
93
94         delete obj_thrd_map;
95         delete obj_map;
96         delete condvar_waiters_map;
97         delete action_trace;
98
99         for (unsigned int i = 0; i < promises->size(); i++)
100                 delete (*promises)[i];
101         delete promises;
102
103         delete pending_rel_seqs;
104
105         delete thrd_last_action;
106         delete thrd_last_fence_release;
107         delete mo_graph;
108         delete priv;
109 }
110
111 int ModelExecution::get_execution_number() const
112 {
113         return model->get_execution_number();
114 }
115
116 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
117 {
118         action_list_t *tmp = hash->get(ptr);
119         if (tmp == NULL) {
120                 tmp = new action_list_t();
121                 hash->put(ptr, tmp);
122         }
123         return tmp;
124 }
125
126 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
127 {
128         SnapVector<action_list_t> *tmp = hash->get(ptr);
129         if (tmp == NULL) {
130                 tmp = new SnapVector<action_list_t>();
131                 hash->put(ptr, tmp);
132         }
133         return tmp;
134 }
135
136 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
137 {
138         SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
139         if (wrv==NULL)
140                 return NULL;
141         unsigned int thread=id_to_int(tid);
142         if (thread < wrv->size())
143                 return &(*wrv)[thread];
144         else
145                 return NULL;
146 }
147
148 /** @return a thread ID for a new Thread */
149 thread_id_t ModelExecution::get_next_id()
150 {
151         return priv->next_thread_id++;
152 }
153
154 /** @return the number of user threads created during this execution */
155 unsigned int ModelExecution::get_num_threads() const
156 {
157         return priv->next_thread_id;
158 }
159
160 /** @return a sequence number for a new ModelAction */
161 modelclock_t ModelExecution::get_next_seq_num()
162 {
163         return ++priv->used_sequence_numbers;
164 }
165
166 /**
167  * @brief Should the current action wake up a given thread?
168  *
169  * @param curr The current action
170  * @param thread The thread that we might wake up
171  * @return True, if we should wake up the sleeping thread; false otherwise
172  */
173 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
174 {
175         const ModelAction *asleep = thread->get_pending();
176         /* Don't allow partial RMW to wake anyone up */
177         if (curr->is_rmwr())
178                 return false;
179         /* Synchronizing actions may have been backtracked */
180         if (asleep->could_synchronize_with(curr))
181                 return true;
182         /* All acquire/release fences and fence-acquire/store-release */
183         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
184                 return true;
185         /* Fence-release + store can awake load-acquire on the same location */
186         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
187                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
188                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
189                         return true;
190         }
191         return false;
192 }
193
194 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
195 {
196         for (unsigned int i = 0; i < get_num_threads(); i++) {
197                 Thread *thr = get_thread(int_to_id(i));
198                 if (scheduler->is_sleep_set(thr)) {
199                         if (should_wake_up(curr, thr))
200                                 /* Remove this thread from sleep set */
201                                 scheduler->remove_sleep(thr);
202                 }
203         }
204 }
205
206 /** @brief Alert the model-checker that an incorrectly-ordered
207  * synchronization was made */
208 void ModelExecution::set_bad_synchronization()
209 {
210         priv->bad_synchronization = true;
211 }
212
213 bool ModelExecution::assert_bug(const char *msg)
214 {
215         priv->bugs.push_back(new bug_message(msg));
216
217         if (isfeasibleprefix()) {
218                 set_assert();
219                 return true;
220         }
221         return false;
222 }
223
224 /** @return True, if any bugs have been reported for this execution */
225 bool ModelExecution::have_bug_reports() const
226 {
227         return priv->bugs.size() != 0;
228 }
229
230 SnapVector<bug_message *> * ModelExecution::get_bugs() const
231 {
232         return &priv->bugs;
233 }
234
235 /**
236  * Check whether the current trace has triggered an assertion which should halt
237  * its execution.
238  *
239  * @return True, if the execution should be aborted; false otherwise
240  */
241 bool ModelExecution::has_asserted() const
242 {
243         return priv->asserted;
244 }
245
246 /**
247  * Trigger a trace assertion which should cause this execution to be halted.
248  * This can be due to a detected bug or due to an infeasibility that should
249  * halt ASAP.
250  */
251 void ModelExecution::set_assert()
252 {
253         priv->asserted = true;
254 }
255
256 /**
257  * Check if we are in a deadlock. Should only be called at the end of an
258  * execution, although it should not give false positives in the middle of an
259  * execution (there should be some ENABLED thread).
260  *
261  * @return True if program is in a deadlock; false otherwise
262  */
263 bool ModelExecution::is_deadlocked() const
264 {
265         bool blocking_threads = false;
266         for (unsigned int i = 0; i < get_num_threads(); i++) {
267                 thread_id_t tid = int_to_id(i);
268                 if (is_enabled(tid))
269                         return false;
270                 Thread *t = get_thread(tid);
271                 if (!t->is_model_thread() && t->get_pending())
272                         blocking_threads = true;
273         }
274         return blocking_threads;
275 }
276
277 /**
278  * Check if this is a complete execution. That is, have all thread completed
279  * execution (rather than exiting because sleep sets have forced a redundant
280  * execution).
281  *
282  * @return True if the execution is complete.
283  */
284 bool ModelExecution::is_complete_execution() const
285 {
286         for (unsigned int i = 0; i < get_num_threads(); i++)
287                 if (is_enabled(int_to_id(i)))
288                         return false;
289         return true;
290 }
291
292 /**
293  * @brief Find the last fence-related backtracking conflict for a ModelAction
294  *
295  * This function performs the search for the most recent conflicting action
296  * against which we should perform backtracking, as affected by fence
297  * operations. This includes pairs of potentially-synchronizing actions which
298  * occur due to fence-acquire or fence-release, and hence should be explored in
299  * the opposite execution order.
300  *
301  * @param act The current action
302  * @return The most recent action which conflicts with act due to fences
303  */
304 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
305 {
306         /* Only perform release/acquire fence backtracking for stores */
307         if (!act->is_write())
308                 return NULL;
309
310         /* Find a fence-release (or, act is a release) */
311         ModelAction *last_release;
312         if (act->is_release())
313                 last_release = act;
314         else
315                 last_release = get_last_fence_release(act->get_tid());
316         if (!last_release)
317                 return NULL;
318
319         /* Skip past the release */
320         action_list_t *list = action_trace;
321         action_list_t::reverse_iterator rit;
322         for (rit = list->rbegin(); rit != list->rend(); rit++)
323                 if (*rit == last_release)
324                         break;
325         ASSERT(rit != list->rend());
326
327         /* Find a prior:
328          *   load-acquire
329          * or
330          *   load --sb-> fence-acquire */
331         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
332         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
333         bool found_acquire_fences = false;
334         for ( ; rit != list->rend(); rit++) {
335                 ModelAction *prev = *rit;
336                 if (act->same_thread(prev))
337                         continue;
338
339                 int tid = id_to_int(prev->get_tid());
340
341                 if (prev->is_read() && act->same_var(prev)) {
342                         if (prev->is_acquire()) {
343                                 /* Found most recent load-acquire, don't need
344                                  * to search for more fences */
345                                 if (!found_acquire_fences)
346                                         return NULL;
347                         } else {
348                                 prior_loads[tid] = prev;
349                         }
350                 }
351                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
352                         found_acquire_fences = true;
353                         acquire_fences[tid] = prev;
354                 }
355         }
356
357         ModelAction *latest_backtrack = NULL;
358         for (unsigned int i = 0; i < acquire_fences.size(); i++)
359                 if (acquire_fences[i] && prior_loads[i])
360                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
361                                 latest_backtrack = acquire_fences[i];
362         return latest_backtrack;
363 }
364
365 /**
366  * @brief Find the last backtracking conflict for a ModelAction
367  *
368  * This function performs the search for the most recent conflicting action
369  * against which we should perform backtracking. This primary includes pairs of
370  * synchronizing actions which should be explored in the opposite execution
371  * order.
372  *
373  * @param act The current action
374  * @return The most recent action which conflicts with act
375  */
376 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
377 {
378         switch (act->get_type()) {
379         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
380         case ATOMIC_READ:
381         case ATOMIC_WRITE:
382         case ATOMIC_RMW: {
383                 ModelAction *ret = NULL;
384
385                 /* linear search: from most recent to oldest */
386                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
387                 action_list_t::reverse_iterator rit;
388                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
389                         ModelAction *prev = *rit;
390                         if (prev->could_synchronize_with(act)) {
391                                 ret = prev;
392                                 break;
393                         }
394                 }
395
396                 ModelAction *ret2 = get_last_fence_conflict(act);
397                 if (!ret2)
398                         return ret;
399                 if (!ret)
400                         return ret2;
401                 if (*ret < *ret2)
402                         return ret2;
403                 return ret;
404         }
405         case ATOMIC_LOCK:
406         case ATOMIC_TRYLOCK: {
407                 /* linear search: from most recent to oldest */
408                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
409                 action_list_t::reverse_iterator rit;
410                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
411                         ModelAction *prev = *rit;
412                         if (act->is_conflicting_lock(prev))
413                                 return prev;
414                 }
415                 break;
416         }
417         case ATOMIC_UNLOCK: {
418                 /* linear search: from most recent to oldest */
419                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
420                 action_list_t::reverse_iterator rit;
421                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
422                         ModelAction *prev = *rit;
423                         if (!act->same_thread(prev) && prev->is_failed_trylock())
424                                 return prev;
425                 }
426                 break;
427         }
428         case ATOMIC_WAIT: {
429                 /* linear search: from most recent to oldest */
430                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
431                 action_list_t::reverse_iterator rit;
432                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
433                         ModelAction *prev = *rit;
434                         if (!act->same_thread(prev) && prev->is_failed_trylock())
435                                 return prev;
436                         if (!act->same_thread(prev) && prev->is_notify())
437                                 return prev;
438                 }
439                 break;
440         }
441
442         case ATOMIC_NOTIFY_ALL:
443         case ATOMIC_NOTIFY_ONE: {
444                 /* linear search: from most recent to oldest */
445                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
446                 action_list_t::reverse_iterator rit;
447                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
448                         ModelAction *prev = *rit;
449                         if (!act->same_thread(prev) && prev->is_wait())
450                                 return prev;
451                 }
452                 break;
453         }
454         default:
455                 break;
456         }
457         return NULL;
458 }
459
460 /** This method finds backtracking points where we should try to
461  * reorder the parameter ModelAction against.
462  *
463  * @param the ModelAction to find backtracking points for.
464  */
465 void ModelExecution::set_backtracking(ModelAction *act)
466 {
467         Thread *t = get_thread(act);
468         ModelAction *prev = get_last_conflict(act);
469         if (prev == NULL)
470                 return;
471
472         Node *node = prev->get_node()->get_parent();
473
474         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
475         int low_tid, high_tid;
476         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
477                 low_tid = id_to_int(act->get_tid());
478                 high_tid = low_tid + 1;
479         } else {
480                 low_tid = 0;
481                 high_tid = get_num_threads();
482         }
483
484         for (int i = low_tid; i < high_tid; i++) {
485                 thread_id_t tid = int_to_id(i);
486
487                 /* Make sure this thread can be enabled here. */
488                 if (i >= node->get_num_threads())
489                         break;
490
491                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
492                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
493                 if (node->enabled_status(tid) != THREAD_ENABLED)
494                         continue;
495
496                 /* Check if this has been explored already */
497                 if (node->has_been_explored(tid))
498                         continue;
499
500                 /* See if fairness allows */
501                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
502                         bool unfair = false;
503                         for (int t = 0; t < node->get_num_threads(); t++) {
504                                 thread_id_t tother = int_to_id(t);
505                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
506                                         unfair = true;
507                                         break;
508                                 }
509                         }
510                         if (unfair)
511                                 continue;
512                 }
513
514                 /* See if CHESS-like yield fairness allows */
515                 if (params->yieldon) {
516                         bool unfair = false;
517                         for (int t = 0; t < node->get_num_threads(); t++) {
518                                 thread_id_t tother = int_to_id(t);
519                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
520                                         unfair = true;
521                                         break;
522                                 }
523                         }
524                         if (unfair)
525                                 continue;
526                 }
527
528                 /* Cache the latest backtracking point */
529                 set_latest_backtrack(prev);
530
531                 /* If this is a new backtracking point, mark the tree */
532                 if (!node->set_backtrack(tid))
533                         continue;
534                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
535                                         id_to_int(prev->get_tid()),
536                                         id_to_int(t->get_id()));
537                 if (DBG_ENABLED()) {
538                         prev->print();
539                         act->print();
540                 }
541         }
542 }
543
544 /**
545  * @brief Cache the a backtracking point as the "most recent", if eligible
546  *
547  * Note that this does not prepare the NodeStack for this backtracking
548  * operation, it only caches the action on a per-execution basis
549  *
550  * @param act The operation at which we should explore a different next action
551  * (i.e., backtracking point)
552  * @return True, if this action is now the most recent backtracking point;
553  * false otherwise
554  */
555 bool ModelExecution::set_latest_backtrack(ModelAction *act)
556 {
557         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
558                 priv->next_backtrack = act;
559                 return true;
560         }
561         return false;
562 }
563
564 /**
565  * Returns last backtracking point. The model checker will explore a different
566  * path for this point in the next execution.
567  * @return The ModelAction at which the next execution should diverge.
568  */
569 ModelAction * ModelExecution::get_next_backtrack()
570 {
571         ModelAction *next = priv->next_backtrack;
572         priv->next_backtrack = NULL;
573         return next;
574 }
575
576 /**
577  * Processes a read model action.
578  * @param curr is the read model action to process.
579  * @return True if processing this read updates the mo_graph.
580  */
581 bool ModelExecution::process_read(ModelAction *curr)
582 {
583         Node *node = curr->get_node();
584         while (true) {
585                 bool updated = false;
586                 switch (node->get_read_from_status()) {
587                 case READ_FROM_PAST: {
588                         const ModelAction *rf = node->get_read_from_past();
589                         ASSERT(rf);
590
591                         mo_graph->startChanges();
592
593                         ASSERT(!is_infeasible());
594                         if (!check_recency(curr, rf)) {
595                                 if (node->increment_read_from()) {
596                                         mo_graph->rollbackChanges();
597                                         continue;
598                                 } else {
599                                         priv->too_many_reads = true;
600                                 }
601                         }
602
603                         updated = r_modification_order(curr, rf);
604                         read_from(curr, rf);
605                         mo_graph->commitChanges();
606                         mo_check_promises(curr, true);
607                         break;
608                 }
609                 case READ_FROM_PROMISE: {
610                         Promise *promise = curr->get_node()->get_read_from_promise();
611                         if (promise->add_reader(curr))
612                                 priv->failed_promise = true;
613                         curr->set_read_from_promise(promise);
614                         mo_graph->startChanges();
615                         if (!check_recency(curr, promise))
616                                 priv->too_many_reads = true;
617                         updated = r_modification_order(curr, promise);
618                         mo_graph->commitChanges();
619                         break;
620                 }
621                 case READ_FROM_FUTURE: {
622                         /* Read from future value */
623                         struct future_value fv = node->get_future_value();
624                         Promise *promise = new Promise(this, curr, fv);
625                         curr->set_read_from_promise(promise);
626                         promises->push_back(promise);
627                         mo_graph->startChanges();
628                         updated = r_modification_order(curr, promise);
629                         mo_graph->commitChanges();
630                         break;
631                 }
632                 default:
633                         ASSERT(false);
634                 }
635                 get_thread(curr)->set_return_value(curr->get_return_value());
636                 return updated;
637         }
638 }
639
640 /**
641  * Processes a lock, trylock, or unlock model action.  @param curr is
642  * the read model action to process.
643  *
644  * The try lock operation checks whether the lock is taken.  If not,
645  * it falls to the normal lock operation case.  If so, it returns
646  * fail.
647  *
648  * The lock operation has already been checked that it is enabled, so
649  * it just grabs the lock and synchronizes with the previous unlock.
650  *
651  * The unlock operation has to re-enable all of the threads that are
652  * waiting on the lock.
653  *
654  * @return True if synchronization was updated; false otherwise
655  */
656 bool ModelExecution::process_mutex(ModelAction *curr)
657 {
658         std::mutex *mutex = curr->get_mutex();
659         struct std::mutex_state *state = NULL;
660
661         if (mutex)
662                 state = mutex->get_state();
663
664         switch (curr->get_type()) {
665         case ATOMIC_TRYLOCK: {
666                 bool success = !state->locked;
667                 curr->set_try_lock(success);
668                 if (!success) {
669                         get_thread(curr)->set_return_value(0);
670                         break;
671                 }
672                 get_thread(curr)->set_return_value(1);
673         }
674                 //otherwise fall into the lock case
675         case ATOMIC_LOCK: {
676                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
677                         assert_bug("Lock access before initialization");
678                 state->locked = get_thread(curr);
679                 ModelAction *unlock = get_last_unlock(curr);
680                 //synchronize with the previous unlock statement
681                 if (unlock != NULL) {
682                         synchronize(unlock, curr);
683                         return true;
684                 }
685                 break;
686         }
687         case ATOMIC_WAIT:
688         case ATOMIC_UNLOCK: {
689                 /* wake up the other threads */
690                 for (unsigned int i = 0; i < get_num_threads(); i++) {
691                         Thread *t = get_thread(int_to_id(i));
692                         Thread *curr_thrd = get_thread(curr);
693                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
694                                 scheduler->wake(t);
695                 }
696
697                 /* unlock the lock - after checking who was waiting on it */
698                 state->locked = NULL;
699
700                 if (!curr->is_wait())
701                         break; /* The rest is only for ATOMIC_WAIT */
702
703                 /* Should we go to sleep? (simulate spurious failures) */
704                 if (curr->get_node()->get_misc() == 0) {
705                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
706                         /* disable us */
707                         scheduler->sleep(get_thread(curr));
708                 }
709                 break;
710         }
711         case ATOMIC_NOTIFY_ALL: {
712                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
713                 //activate all the waiting threads
714                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
715                         scheduler->wake(get_thread(*rit));
716                 }
717                 waiters->clear();
718                 break;
719         }
720         case ATOMIC_NOTIFY_ONE: {
721                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
722                 int wakeupthread = curr->get_node()->get_misc();
723                 action_list_t::iterator it = waiters->begin();
724                 advance(it, wakeupthread);
725                 scheduler->wake(get_thread(*it));
726                 waiters->erase(it);
727                 break;
728         }
729
730         default:
731                 ASSERT(0);
732         }
733         return false;
734 }
735
736 /**
737  * @brief Check if the current pending promises allow a future value to be sent
738  *
739  * If one of the following is true:
740  *  (a) there are no pending promises
741  *  (b) the reader and writer do not cross any promises
742  * Then, it is safe to pass a future value back now.
743  *
744  * Otherwise, we must save the pending future value until (a) or (b) is true
745  *
746  * @param writer The operation which sends the future value. Must be a write.
747  * @param reader The operation which will observe the value. Must be a read.
748  * @return True if the future value can be sent now; false if it must wait.
749  */
750 bool ModelExecution::promises_may_allow(const ModelAction *writer,
751                 const ModelAction *reader) const
752 {
753         if (promises->empty())
754                 return true;
755         for(int i=promises->size()-1;i>=0;i--) {
756                 ModelAction *pr=(*promises)[i]->get_reader(0);
757                 //reader is after promise...doesn't cross any promise
758                 if (*reader > *pr)
759                         return true;
760                 //writer is after promise, reader before...bad...
761                 if (*writer > *pr)
762                         return false;
763         }
764         return true;
765 }
766
767 /**
768  * @brief Add a future value to a reader
769  *
770  * This function performs a few additional checks to ensure that the future
771  * value can be feasibly observed by the reader
772  *
773  * @param writer The operation whose value is sent. Must be a write.
774  * @param reader The read operation which may read the future value. Must be a read.
775  */
776 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
777 {
778         /* Do more ambitious checks now that mo is more complete */
779         if (!mo_may_allow(writer, reader))
780                 return;
781
782         Node *node = reader->get_node();
783
784         /* Find an ancestor thread which exists at the time of the reader */
785         Thread *write_thread = get_thread(writer);
786         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
787                 write_thread = write_thread->get_parent();
788
789         struct future_value fv = {
790                 writer->get_write_value(),
791                 writer->get_seq_number() + params->maxfuturedelay,
792                 write_thread->get_id(),
793         };
794         if (node->add_future_value(fv))
795                 set_latest_backtrack(reader);
796 }
797
798 /**
799  * Process a write ModelAction
800  * @param curr The ModelAction to process
801  * @return True if the mo_graph was updated or promises were resolved
802  */
803 bool ModelExecution::process_write(ModelAction *curr)
804 {
805         /* Readers to which we may send our future value */
806         ModelVector<ModelAction *> send_fv;
807
808         const ModelAction *earliest_promise_reader;
809         bool updated_promises = false;
810
811         bool updated_mod_order = w_modification_order(curr, &send_fv);
812         Promise *promise = pop_promise_to_resolve(curr);
813
814         if (promise) {
815                 earliest_promise_reader = promise->get_reader(0);
816                 updated_promises = resolve_promise(curr, promise);
817         } else
818                 earliest_promise_reader = NULL;
819
820         for (unsigned int i = 0; i < send_fv.size(); i++) {
821                 ModelAction *read = send_fv[i];
822
823                 /* Don't send future values to reads after the Promise we resolve */
824                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
825                         /* Check if future value can be sent immediately */
826                         if (promises_may_allow(curr, read)) {
827                                 add_future_value(curr, read);
828                         } else {
829                                 futurevalues->push_back(PendingFutureValue(curr, read));
830                         }
831                 }
832         }
833
834         /* Check the pending future values */
835         for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
836                 struct PendingFutureValue pfv = (*futurevalues)[i];
837                 if (promises_may_allow(pfv.writer, pfv.reader)) {
838                         add_future_value(pfv.writer, pfv.reader);
839                         futurevalues->erase(futurevalues->begin() + i);
840                 }
841         }
842
843         mo_graph->commitChanges();
844         mo_check_promises(curr, false);
845
846         get_thread(curr)->set_return_value(VALUE_NONE);
847         return updated_mod_order || updated_promises;
848 }
849
850 /**
851  * Process a fence ModelAction
852  * @param curr The ModelAction to process
853  * @return True if synchronization was updated
854  */
855 bool ModelExecution::process_fence(ModelAction *curr)
856 {
857         /*
858          * fence-relaxed: no-op
859          * fence-release: only log the occurence (not in this function), for
860          *   use in later synchronization
861          * fence-acquire (this function): search for hypothetical release
862          *   sequences
863          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
864          */
865         bool updated = false;
866         if (curr->is_acquire()) {
867                 action_list_t *list = action_trace;
868                 action_list_t::reverse_iterator rit;
869                 /* Find X : is_read(X) && X --sb-> curr */
870                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
871                         ModelAction *act = *rit;
872                         if (act == curr)
873                                 continue;
874                         if (act->get_tid() != curr->get_tid())
875                                 continue;
876                         /* Stop at the beginning of the thread */
877                         if (act->is_thread_start())
878                                 break;
879                         /* Stop once we reach a prior fence-acquire */
880                         if (act->is_fence() && act->is_acquire())
881                                 break;
882                         if (!act->is_read())
883                                 continue;
884                         /* read-acquire will find its own release sequences */
885                         if (act->is_acquire())
886                                 continue;
887
888                         /* Establish hypothetical release sequences */
889                         rel_heads_list_t release_heads;
890                         get_release_seq_heads(curr, act, &release_heads);
891                         for (unsigned int i = 0; i < release_heads.size(); i++)
892                                 synchronize(release_heads[i], curr);
893                         if (release_heads.size() != 0)
894                                 updated = true;
895                 }
896         }
897         return updated;
898 }
899
900 /**
901  * @brief Process the current action for thread-related activity
902  *
903  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
904  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
905  * synchronization, etc.  This function is a no-op for non-THREAD actions
906  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
907  *
908  * @param curr The current action
909  * @return True if synchronization was updated or a thread completed
910  */
911 bool ModelExecution::process_thread_action(ModelAction *curr)
912 {
913         bool updated = false;
914
915         switch (curr->get_type()) {
916         case THREAD_CREATE: {
917                 thrd_t *thrd = (thrd_t *)curr->get_location();
918                 struct thread_params *params = (struct thread_params *)curr->get_value();
919                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
920                 add_thread(th);
921                 th->set_creation(curr);
922                 /* Promises can be satisfied by children */
923                 for (unsigned int i = 0; i < promises->size(); i++) {
924                         Promise *promise = (*promises)[i];
925                         if (promise->thread_is_available(curr->get_tid()))
926                                 promise->add_thread(th->get_id());
927                 }
928                 break;
929         }
930         case THREAD_JOIN: {
931                 Thread *blocking = curr->get_thread_operand();
932                 ModelAction *act = get_last_action(blocking->get_id());
933                 synchronize(act, curr);
934                 updated = true; /* trigger rel-seq checks */
935                 break;
936         }
937         case THREAD_FINISH: {
938                 Thread *th = get_thread(curr);
939                 /* Wake up any joining threads */
940                 for (unsigned int i = 0; i < get_num_threads(); i++) {
941                         Thread *waiting = get_thread(int_to_id(i));
942                         if (waiting->waiting_on() == th &&
943                                         waiting->get_pending()->is_thread_join())
944                                 scheduler->wake(waiting);
945                 }
946                 th->complete();
947                 /* Completed thread can't satisfy promises */
948                 for (unsigned int i = 0; i < promises->size(); i++) {
949                         Promise *promise = (*promises)[i];
950                         if (promise->thread_is_available(th->get_id()))
951                                 if (promise->eliminate_thread(th->get_id()))
952                                         priv->failed_promise = true;
953                 }
954                 updated = true; /* trigger rel-seq checks */
955                 break;
956         }
957         case THREAD_START: {
958                 check_promises(curr->get_tid(), NULL, curr->get_cv());
959                 break;
960         }
961         default:
962                 break;
963         }
964
965         return updated;
966 }
967
968 /**
969  * @brief Process the current action for release sequence fixup activity
970  *
971  * Performs model-checker release sequence fixups for the current action,
972  * forcing a single pending release sequence to break (with a given, potential
973  * "loose" write) or to complete (i.e., synchronize). If a pending release
974  * sequence forms a complete release sequence, then we must perform the fixup
975  * synchronization, mo_graph additions, etc.
976  *
977  * @param curr The current action; must be a release sequence fixup action
978  * @param work_queue The work queue to which to add work items as they are
979  * generated
980  */
981 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
982 {
983         const ModelAction *write = curr->get_node()->get_relseq_break();
984         struct release_seq *sequence = pending_rel_seqs->back();
985         pending_rel_seqs->pop_back();
986         ASSERT(sequence);
987         ModelAction *acquire = sequence->acquire;
988         const ModelAction *rf = sequence->rf;
989         const ModelAction *release = sequence->release;
990         ASSERT(acquire);
991         ASSERT(release);
992         ASSERT(rf);
993         ASSERT(release->same_thread(rf));
994
995         if (write == NULL) {
996                 /**
997                  * @todo Forcing a synchronization requires that we set
998                  * modification order constraints. For instance, we can't allow
999                  * a fixup sequence in which two separate read-acquire
1000                  * operations read from the same sequence, where the first one
1001                  * synchronizes and the other doesn't. Essentially, we can't
1002                  * allow any writes to insert themselves between 'release' and
1003                  * 'rf'
1004                  */
1005
1006                 /* Must synchronize */
1007                 if (!synchronize(release, acquire))
1008                         return;
1009                 /* Re-check all pending release sequences */
1010                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1011                 /* Re-check act for mo_graph edges */
1012                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1013
1014                 /* propagate synchronization to later actions */
1015                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1016                 for (; (*rit) != acquire; rit++) {
1017                         ModelAction *propagate = *rit;
1018                         if (acquire->happens_before(propagate)) {
1019                                 synchronize(acquire, propagate);
1020                                 /* Re-check 'propagate' for mo_graph edges */
1021                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1022                         }
1023                 }
1024         } else {
1025                 /* Break release sequence with new edges:
1026                  *   release --mo--> write --mo--> rf */
1027                 mo_graph->addEdge(release, write);
1028                 mo_graph->addEdge(write, rf);
1029         }
1030
1031         /* See if we have realized a data race */
1032         checkDataRaces();
1033 }
1034
1035 /**
1036  * Initialize the current action by performing one or more of the following
1037  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1038  * in the NodeStack, manipulating backtracking sets, allocating and
1039  * initializing clock vectors, and computing the promises to fulfill.
1040  *
1041  * @param curr The current action, as passed from the user context; may be
1042  * freed/invalidated after the execution of this function, with a different
1043  * action "returned" its place (pass-by-reference)
1044  * @return True if curr is a newly-explored action; false otherwise
1045  */
1046 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1047 {
1048         ModelAction *newcurr;
1049
1050         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1051                 newcurr = process_rmw(*curr);
1052                 delete *curr;
1053
1054                 if (newcurr->is_rmw())
1055                         compute_promises(newcurr);
1056
1057                 *curr = newcurr;
1058                 return false;
1059         }
1060
1061         (*curr)->set_seq_number(get_next_seq_num());
1062
1063         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1064         if (newcurr) {
1065                 /* First restore type and order in case of RMW operation */
1066                 if ((*curr)->is_rmwr())
1067                         newcurr->copy_typeandorder(*curr);
1068
1069                 ASSERT((*curr)->get_location() == newcurr->get_location());
1070                 newcurr->copy_from_new(*curr);
1071
1072                 /* Discard duplicate ModelAction; use action from NodeStack */
1073                 delete *curr;
1074
1075                 /* Always compute new clock vector */
1076                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1077
1078                 *curr = newcurr;
1079                 return false; /* Action was explored previously */
1080         } else {
1081                 newcurr = *curr;
1082
1083                 /* Always compute new clock vector */
1084                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1085
1086                 /* Assign most recent release fence */
1087                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1088
1089                 /*
1090                  * Perform one-time actions when pushing new ModelAction onto
1091                  * NodeStack
1092                  */
1093                 if (newcurr->is_write())
1094                         compute_promises(newcurr);
1095                 else if (newcurr->is_relseq_fixup())
1096                         compute_relseq_breakwrites(newcurr);
1097                 else if (newcurr->is_wait())
1098                         newcurr->get_node()->set_misc_max(2);
1099                 else if (newcurr->is_notify_one()) {
1100                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1101                 }
1102                 return true; /* This was a new ModelAction */
1103         }
1104 }
1105
1106 /**
1107  * @brief Establish reads-from relation between two actions
1108  *
1109  * Perform basic operations involved with establishing a concrete rf relation,
1110  * including setting the ModelAction data and checking for release sequences.
1111  *
1112  * @param act The action that is reading (must be a read)
1113  * @param rf The action from which we are reading (must be a write)
1114  *
1115  * @return True if this read established synchronization
1116  */
1117 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1118 {
1119         ASSERT(rf);
1120         ASSERT(rf->is_write());
1121
1122         act->set_read_from(rf);
1123         if (act->is_acquire()) {
1124                 rel_heads_list_t release_heads;
1125                 get_release_seq_heads(act, act, &release_heads);
1126                 int num_heads = release_heads.size();
1127                 for (unsigned int i = 0; i < release_heads.size(); i++)
1128                         if (!synchronize(release_heads[i], act))
1129                                 num_heads--;
1130                 return num_heads > 0;
1131         }
1132         return false;
1133 }
1134
1135 /**
1136  * @brief Synchronizes two actions
1137  *
1138  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1139  * This function performs the synchronization as well as providing other hooks
1140  * for other checks along with synchronization.
1141  *
1142  * @param first The left-hand side of the synchronizes-with relation
1143  * @param second The right-hand side of the synchronizes-with relation
1144  * @return True if the synchronization was successful (i.e., was consistent
1145  * with the execution order); false otherwise
1146  */
1147 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1148 {
1149         if (*second < *first) {
1150                 set_bad_synchronization();
1151                 return false;
1152         }
1153         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1154         return second->synchronize_with(first);
1155 }
1156
1157 /**
1158  * Check promises and eliminate potentially-satisfying threads when a thread is
1159  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1160  * no longer satisfy a promise generated from that thread.
1161  *
1162  * @param blocker The thread on which a thread is waiting
1163  * @param waiting The waiting thread
1164  */
1165 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1166 {
1167         for (unsigned int i = 0; i < promises->size(); i++) {
1168                 Promise *promise = (*promises)[i];
1169                 if (!promise->thread_is_available(waiting->get_id()))
1170                         continue;
1171                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1172                         ModelAction *reader = promise->get_reader(j);
1173                         if (reader->get_tid() != blocker->get_id())
1174                                 continue;
1175                         if (promise->eliminate_thread(waiting->get_id())) {
1176                                 /* Promise has failed */
1177                                 priv->failed_promise = true;
1178                         } else {
1179                                 /* Only eliminate the 'waiting' thread once */
1180                                 return;
1181                         }
1182                 }
1183         }
1184 }
1185
1186 /**
1187  * @brief Check whether a model action is enabled.
1188  *
1189  * Checks whether a lock or join operation would be successful (i.e., is the
1190  * lock already locked, or is the joined thread already complete). If not, put
1191  * the action in a waiter list.
1192  *
1193  * @param curr is the ModelAction to check whether it is enabled.
1194  * @return a bool that indicates whether the action is enabled.
1195  */
1196 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1197         if (curr->is_lock()) {
1198                 std::mutex *lock = curr->get_mutex();
1199                 struct std::mutex_state *state = lock->get_state();
1200                 if (state->locked)
1201                         return false;
1202         } else if (curr->is_thread_join()) {
1203                 Thread *blocking = curr->get_thread_operand();
1204                 if (!blocking->is_complete()) {
1205                         thread_blocking_check_promises(blocking, get_thread(curr));
1206                         return false;
1207                 }
1208         }
1209
1210         return true;
1211 }
1212
1213 /**
1214  * This is the heart of the model checker routine. It performs model-checking
1215  * actions corresponding to a given "current action." Among other processes, it
1216  * calculates reads-from relationships, updates synchronization clock vectors,
1217  * forms a memory_order constraints graph, and handles replay/backtrack
1218  * execution when running permutations of previously-observed executions.
1219  *
1220  * @param curr The current action to process
1221  * @return The ModelAction that is actually executed; may be different than
1222  * curr; may be NULL, if the current action is not enabled to run
1223  */
1224 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1225 {
1226         ASSERT(curr);
1227         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1228         bool newly_explored = initialize_curr_action(&curr);
1229
1230         DBG();
1231
1232         wake_up_sleeping_actions(curr);
1233
1234         /* Compute fairness information for CHESS yield algorithm */
1235         if (params->yieldon) {
1236                 curr->get_node()->update_yield(scheduler);
1237         }
1238
1239         /* Add the action to lists before any other model-checking tasks */
1240         if (!second_part_of_rmw)
1241                 add_action_to_lists(curr);
1242
1243         /* Build may_read_from set for newly-created actions */
1244         if (newly_explored && curr->is_read())
1245                 build_may_read_from(curr);
1246
1247         /* Initialize work_queue with the "current action" work */
1248         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1249         while (!work_queue.empty() && !has_asserted()) {
1250                 WorkQueueEntry work = work_queue.front();
1251                 work_queue.pop_front();
1252
1253                 switch (work.type) {
1254                 case WORK_CHECK_CURR_ACTION: {
1255                         ModelAction *act = work.action;
1256                         bool update = false; /* update this location's release seq's */
1257                         bool update_all = false; /* update all release seq's */
1258
1259                         if (process_thread_action(curr))
1260                                 update_all = true;
1261
1262                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1263                                 update = true;
1264
1265                         if (act->is_write() && process_write(act))
1266                                 update = true;
1267
1268                         if (act->is_fence() && process_fence(act))
1269                                 update_all = true;
1270
1271                         if (act->is_mutex_op() && process_mutex(act))
1272                                 update_all = true;
1273
1274                         if (act->is_relseq_fixup())
1275                                 process_relseq_fixup(curr, &work_queue);
1276
1277                         if (update_all)
1278                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1279                         else if (update)
1280                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1281                         break;
1282                 }
1283                 case WORK_CHECK_RELEASE_SEQ:
1284                         resolve_release_sequences(work.location, &work_queue);
1285                         break;
1286                 case WORK_CHECK_MO_EDGES: {
1287                         /** @todo Complete verification of work_queue */
1288                         ModelAction *act = work.action;
1289                         bool updated = false;
1290
1291                         if (act->is_read()) {
1292                                 const ModelAction *rf = act->get_reads_from();
1293                                 const Promise *promise = act->get_reads_from_promise();
1294                                 if (rf) {
1295                                         if (r_modification_order(act, rf))
1296                                                 updated = true;
1297                                 } else if (promise) {
1298                                         if (r_modification_order(act, promise))
1299                                                 updated = true;
1300                                 }
1301                         }
1302                         if (act->is_write()) {
1303                                 if (w_modification_order(act, NULL))
1304                                         updated = true;
1305                         }
1306                         mo_graph->commitChanges();
1307
1308                         if (updated)
1309                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1310                         break;
1311                 }
1312                 default:
1313                         ASSERT(false);
1314                         break;
1315                 }
1316         }
1317
1318         check_curr_backtracking(curr);
1319         set_backtracking(curr);
1320         return curr;
1321 }
1322
1323 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1324 {
1325         Node *currnode = curr->get_node();
1326         Node *parnode = currnode->get_parent();
1327
1328         if ((parnode && !parnode->backtrack_empty()) ||
1329                          !currnode->misc_empty() ||
1330                          !currnode->read_from_empty() ||
1331                          !currnode->promise_empty() ||
1332                          !currnode->relseq_break_empty()) {
1333                 set_latest_backtrack(curr);
1334         }
1335 }
1336
1337 bool ModelExecution::promises_expired() const
1338 {
1339         for (unsigned int i = 0; i < promises->size(); i++) {
1340                 Promise *promise = (*promises)[i];
1341                 if (promise->get_expiration() < priv->used_sequence_numbers)
1342                         return true;
1343         }
1344         return false;
1345 }
1346
1347 /**
1348  * This is the strongest feasibility check available.
1349  * @return whether the current trace (partial or complete) must be a prefix of
1350  * a feasible trace.
1351  */
1352 bool ModelExecution::isfeasibleprefix() const
1353 {
1354         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1355 }
1356
1357 /**
1358  * Print disagnostic information about an infeasible execution
1359  * @param prefix A string to prefix the output with; if NULL, then a default
1360  * message prefix will be provided
1361  */
1362 void ModelExecution::print_infeasibility(const char *prefix) const
1363 {
1364         char buf[100];
1365         char *ptr = buf;
1366         if (mo_graph->checkForCycles())
1367                 ptr += sprintf(ptr, "[mo cycle]");
1368         if (priv->failed_promise)
1369                 ptr += sprintf(ptr, "[failed promise]");
1370         if (priv->too_many_reads)
1371                 ptr += sprintf(ptr, "[too many reads]");
1372         if (priv->no_valid_reads)
1373                 ptr += sprintf(ptr, "[no valid reads-from]");
1374         if (priv->bad_synchronization)
1375                 ptr += sprintf(ptr, "[bad sw ordering]");
1376         if (promises_expired())
1377                 ptr += sprintf(ptr, "[promise expired]");
1378         if (promises->size() != 0)
1379                 ptr += sprintf(ptr, "[unresolved promise]");
1380         if (ptr != buf)
1381                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1382 }
1383
1384 /**
1385  * Returns whether the current completed trace is feasible, except for pending
1386  * release sequences.
1387  */
1388 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1389 {
1390         return !is_infeasible() && promises->size() == 0;
1391 }
1392
1393 /**
1394  * Check if the current partial trace is infeasible. Does not check any
1395  * end-of-execution flags, which might rule out the execution. Thus, this is
1396  * useful only for ruling an execution as infeasible.
1397  * @return whether the current partial trace is infeasible.
1398  */
1399 bool ModelExecution::is_infeasible() const
1400 {
1401         return mo_graph->checkForCycles() ||
1402                 priv->no_valid_reads ||
1403                 priv->failed_promise ||
1404                 priv->too_many_reads ||
1405                 priv->bad_synchronization ||
1406                 promises_expired();
1407 }
1408
1409 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1410 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1411         ModelAction *lastread = get_last_action(act->get_tid());
1412         lastread->process_rmw(act);
1413         if (act->is_rmw()) {
1414                 if (lastread->get_reads_from())
1415                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1416                 else
1417                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1418                 mo_graph->commitChanges();
1419         }
1420         return lastread;
1421 }
1422
1423 /**
1424  * A helper function for ModelExecution::check_recency, to check if the current
1425  * thread is able to read from a different write/promise for 'params.maxreads'
1426  * number of steps and if that write/promise should become visible (i.e., is
1427  * ordered later in the modification order). This helps model memory liveness.
1428  *
1429  * @param curr The current action. Must be a read.
1430  * @param rf The write/promise from which we plan to read
1431  * @param other_rf The write/promise from which we may read
1432  * @return True if we were able to read from other_rf for params.maxreads steps
1433  */
1434 template <typename T, typename U>
1435 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1436 {
1437         /* Need a different write/promise */
1438         if (other_rf->equals(rf))
1439                 return false;
1440
1441         /* Only look for "newer" writes/promises */
1442         if (!mo_graph->checkReachable(rf, other_rf))
1443                 return false;
1444
1445         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1446         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1447         action_list_t::reverse_iterator rit = list->rbegin();
1448         ASSERT((*rit) == curr);
1449         /* Skip past curr */
1450         rit++;
1451
1452         /* Does this write/promise work for everyone? */
1453         for (int i = 0; i < params->maxreads; i++, rit++) {
1454                 ModelAction *act = *rit;
1455                 if (!act->may_read_from(other_rf))
1456                         return false;
1457         }
1458         return true;
1459 }
1460
1461 /**
1462  * Checks whether a thread has read from the same write or Promise for too many
1463  * times without seeing the effects of a later write/Promise.
1464  *
1465  * Basic idea:
1466  * 1) there must a different write/promise that we could read from,
1467  * 2) we must have read from the same write/promise in excess of maxreads times,
1468  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1469  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1470  *
1471  * If so, we decide that the execution is no longer feasible.
1472  *
1473  * @param curr The current action. Must be a read.
1474  * @param rf The ModelAction/Promise from which we might read.
1475  * @return True if the read should succeed; false otherwise
1476  */
1477 template <typename T>
1478 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1479 {
1480         if (!params->maxreads)
1481                 return true;
1482
1483         //NOTE: Next check is just optimization, not really necessary....
1484         if (curr->get_node()->get_read_from_past_size() +
1485                         curr->get_node()->get_read_from_promise_size() <= 1)
1486                 return true;
1487
1488         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1489         int tid = id_to_int(curr->get_tid());
1490         ASSERT(tid < (int)thrd_lists->size());
1491         action_list_t *list = &(*thrd_lists)[tid];
1492         action_list_t::reverse_iterator rit = list->rbegin();
1493         ASSERT((*rit) == curr);
1494         /* Skip past curr */
1495         rit++;
1496
1497         action_list_t::reverse_iterator ritcopy = rit;
1498         /* See if we have enough reads from the same value */
1499         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1500                 if (ritcopy == list->rend())
1501                         return true;
1502                 ModelAction *act = *ritcopy;
1503                 if (!act->is_read())
1504                         return true;
1505                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1506                         return true;
1507                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1508                         return true;
1509                 if (act->get_node()->get_read_from_past_size() +
1510                                 act->get_node()->get_read_from_promise_size() <= 1)
1511                         return true;
1512         }
1513         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1514                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1515                 if (should_read_instead(curr, rf, write))
1516                         return false; /* liveness failure */
1517         }
1518         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1519                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1520                 if (should_read_instead(curr, rf, promise))
1521                         return false; /* liveness failure */
1522         }
1523         return true;
1524 }
1525
1526 /**
1527  * @brief Updates the mo_graph with the constraints imposed from the current
1528  * read.
1529  *
1530  * Basic idea is the following: Go through each other thread and find
1531  * the last action that happened before our read.  Two cases:
1532  *
1533  * -# The action is a write: that write must either occur before
1534  * the write we read from or be the write we read from.
1535  * -# The action is a read: the write that that action read from
1536  * must occur before the write we read from or be the same write.
1537  *
1538  * @param curr The current action. Must be a read.
1539  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1540  * @return True if modification order edges were added; false otherwise
1541  */
1542 template <typename rf_type>
1543 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1544 {
1545         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1546         unsigned int i;
1547         bool added = false;
1548         ASSERT(curr->is_read());
1549
1550         /* Last SC fence in the current thread */
1551         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1552         ModelAction *last_sc_write = NULL;
1553         if (curr->is_seqcst())
1554                 last_sc_write = get_last_seq_cst_write(curr);
1555
1556         /* Iterate over all threads */
1557         for (i = 0; i < thrd_lists->size(); i++) {
1558                 /* Last SC fence in thread i */
1559                 ModelAction *last_sc_fence_thread_local = NULL;
1560                 if (int_to_id((int)i) != curr->get_tid())
1561                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1562
1563                 /* Last SC fence in thread i, before last SC fence in current thread */
1564                 ModelAction *last_sc_fence_thread_before = NULL;
1565                 if (last_sc_fence_local)
1566                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1567
1568                 /* Iterate over actions in thread, starting from most recent */
1569                 action_list_t *list = &(*thrd_lists)[i];
1570                 action_list_t::reverse_iterator rit;
1571                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1572                         ModelAction *act = *rit;
1573
1574                         /* Skip curr */
1575                         if (act == curr)
1576                                 continue;
1577                         /* Don't want to add reflexive edges on 'rf' */
1578                         if (act->equals(rf)) {
1579                                 if (act->happens_before(curr))
1580                                         break;
1581                                 else
1582                                         continue;
1583                         }
1584
1585                         if (act->is_write()) {
1586                                 /* C++, Section 29.3 statement 5 */
1587                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1588                                                 *act < *last_sc_fence_thread_local) {
1589                                         added = mo_graph->addEdge(act, rf) || added;
1590                                         break;
1591                                 }
1592                                 /* C++, Section 29.3 statement 4 */
1593                                 else if (act->is_seqcst() && last_sc_fence_local &&
1594                                                 *act < *last_sc_fence_local) {
1595                                         added = mo_graph->addEdge(act, rf) || added;
1596                                         break;
1597                                 }
1598                                 /* C++, Section 29.3 statement 6 */
1599                                 else if (last_sc_fence_thread_before &&
1600                                                 *act < *last_sc_fence_thread_before) {
1601                                         added = mo_graph->addEdge(act, rf) || added;
1602                                         break;
1603                                 }
1604                         }
1605
1606                         /* C++, Section 29.3 statement 3 (second subpoint) */
1607                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1608                                 added = mo_graph->addEdge(act, rf) || added;
1609                                 break;
1610                         }
1611
1612                         /*
1613                          * Include at most one act per-thread that "happens
1614                          * before" curr
1615                          */
1616                         if (act->happens_before(curr)) {
1617                                 if (act->is_write()) {
1618                                         added = mo_graph->addEdge(act, rf) || added;
1619                                 } else {
1620                                         const ModelAction *prevrf = act->get_reads_from();
1621                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1622                                         if (prevrf) {
1623                                                 if (!prevrf->equals(rf))
1624                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1625                                         } else if (!prevrf_promise->equals(rf)) {
1626                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1627                                         }
1628                                 }
1629                                 break;
1630                         }
1631                 }
1632         }
1633
1634         /*
1635          * All compatible, thread-exclusive promises must be ordered after any
1636          * concrete loads from the same thread
1637          */
1638         for (unsigned int i = 0; i < promises->size(); i++)
1639                 if ((*promises)[i]->is_compatible_exclusive(curr))
1640                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1641
1642         return added;
1643 }
1644
1645 /**
1646  * Updates the mo_graph with the constraints imposed from the current write.
1647  *
1648  * Basic idea is the following: Go through each other thread and find
1649  * the lastest action that happened before our write.  Two cases:
1650  *
1651  * (1) The action is a write => that write must occur before
1652  * the current write
1653  *
1654  * (2) The action is a read => the write that that action read from
1655  * must occur before the current write.
1656  *
1657  * This method also handles two other issues:
1658  *
1659  * (I) Sequential Consistency: Making sure that if the current write is
1660  * seq_cst, that it occurs after the previous seq_cst write.
1661  *
1662  * (II) Sending the write back to non-synchronizing reads.
1663  *
1664  * @param curr The current action. Must be a write.
1665  * @param send_fv A vector for stashing reads to which we may pass our future
1666  * value. If NULL, then don't record any future values.
1667  * @return True if modification order edges were added; false otherwise
1668  */
1669 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1670 {
1671         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1672         unsigned int i;
1673         bool added = false;
1674         ASSERT(curr->is_write());
1675
1676         if (curr->is_seqcst()) {
1677                 /* We have to at least see the last sequentially consistent write,
1678                          so we are initialized. */
1679                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1680                 if (last_seq_cst != NULL) {
1681                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1682                 }
1683         }
1684
1685         /* Last SC fence in the current thread */
1686         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1687
1688         /* Iterate over all threads */
1689         for (i = 0; i < thrd_lists->size(); i++) {
1690                 /* Last SC fence in thread i, before last SC fence in current thread */
1691                 ModelAction *last_sc_fence_thread_before = NULL;
1692                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1693                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1694
1695                 /* Iterate over actions in thread, starting from most recent */
1696                 action_list_t *list = &(*thrd_lists)[i];
1697                 action_list_t::reverse_iterator rit;
1698                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1699                         ModelAction *act = *rit;
1700                         if (act == curr) {
1701                                 /*
1702                                  * 1) If RMW and it actually read from something, then we
1703                                  * already have all relevant edges, so just skip to next
1704                                  * thread.
1705                                  *
1706                                  * 2) If RMW and it didn't read from anything, we should
1707                                  * whatever edge we can get to speed up convergence.
1708                                  *
1709                                  * 3) If normal write, we need to look at earlier actions, so
1710                                  * continue processing list.
1711                                  */
1712                                 if (curr->is_rmw()) {
1713                                         if (curr->get_reads_from() != NULL)
1714                                                 break;
1715                                         else
1716                                                 continue;
1717                                 } else
1718                                         continue;
1719                         }
1720
1721                         /* C++, Section 29.3 statement 7 */
1722                         if (last_sc_fence_thread_before && act->is_write() &&
1723                                         *act < *last_sc_fence_thread_before) {
1724                                 added = mo_graph->addEdge(act, curr) || added;
1725                                 break;
1726                         }
1727
1728                         /*
1729                          * Include at most one act per-thread that "happens
1730                          * before" curr
1731                          */
1732                         if (act->happens_before(curr)) {
1733                                 /*
1734                                  * Note: if act is RMW, just add edge:
1735                                  *   act --mo--> curr
1736                                  * The following edge should be handled elsewhere:
1737                                  *   readfrom(act) --mo--> act
1738                                  */
1739                                 if (act->is_write())
1740                                         added = mo_graph->addEdge(act, curr) || added;
1741                                 else if (act->is_read()) {
1742                                         //if previous read accessed a null, just keep going
1743                                         if (act->get_reads_from() == NULL)
1744                                                 continue;
1745                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1746                                 }
1747                                 break;
1748                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1749                                                      !act->same_thread(curr)) {
1750                                 /* We have an action that:
1751                                    (1) did not happen before us
1752                                    (2) is a read and we are a write
1753                                    (3) cannot synchronize with us
1754                                    (4) is in a different thread
1755                                    =>
1756                                    that read could potentially read from our write.  Note that
1757                                    these checks are overly conservative at this point, we'll
1758                                    do more checks before actually removing the
1759                                    pendingfuturevalue.
1760
1761                                  */
1762                                 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1763                                         if (!is_infeasible())
1764                                                 send_fv->push_back(act);
1765                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1766                                                 add_future_value(curr, act);
1767                                 }
1768                         }
1769                 }
1770         }
1771
1772         /*
1773          * All compatible, thread-exclusive promises must be ordered after any
1774          * concrete stores to the same thread, or else they can be merged with
1775          * this store later
1776          */
1777         for (unsigned int i = 0; i < promises->size(); i++)
1778                 if ((*promises)[i]->is_compatible_exclusive(curr))
1779                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1780
1781         return added;
1782 }
1783
1784 /** Arbitrary reads from the future are not allowed.  Section 29.3
1785  * part 9 places some constraints.  This method checks one result of constraint
1786  * constraint.  Others require compiler support. */
1787 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1788 {
1789         if (!writer->is_rmw())
1790                 return true;
1791
1792         if (!reader->is_rmw())
1793                 return true;
1794
1795         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1796                 if (search == reader)
1797                         return false;
1798                 if (search->get_tid() == reader->get_tid() &&
1799                                 search->happens_before(reader))
1800                         break;
1801         }
1802
1803         return true;
1804 }
1805
1806 /**
1807  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1808  * some constraints. This method checks one the following constraint (others
1809  * require compiler support):
1810  *
1811  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1812  */
1813 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1814 {
1815         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1816         unsigned int i;
1817         /* Iterate over all threads */
1818         for (i = 0; i < thrd_lists->size(); i++) {
1819                 const ModelAction *write_after_read = NULL;
1820
1821                 /* Iterate over actions in thread, starting from most recent */
1822                 action_list_t *list = &(*thrd_lists)[i];
1823                 action_list_t::reverse_iterator rit;
1824                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1825                         ModelAction *act = *rit;
1826
1827                         /* Don't disallow due to act == reader */
1828                         if (!reader->happens_before(act) || reader == act)
1829                                 break;
1830                         else if (act->is_write())
1831                                 write_after_read = act;
1832                         else if (act->is_read() && act->get_reads_from() != NULL)
1833                                 write_after_read = act->get_reads_from();
1834                 }
1835
1836                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1837                         return false;
1838         }
1839         return true;
1840 }
1841
1842 /**
1843  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1844  * The ModelAction under consideration is expected to be taking part in
1845  * release/acquire synchronization as an object of the "reads from" relation.
1846  * Note that this can only provide release sequence support for RMW chains
1847  * which do not read from the future, as those actions cannot be traced until
1848  * their "promise" is fulfilled. Similarly, we may not even establish the
1849  * presence of a release sequence with certainty, as some modification order
1850  * constraints may be decided further in the future. Thus, this function
1851  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1852  * and a boolean representing certainty.
1853  *
1854  * @param rf The action that might be part of a release sequence. Must be a
1855  * write.
1856  * @param release_heads A pass-by-reference style return parameter. After
1857  * execution of this function, release_heads will contain the heads of all the
1858  * relevant release sequences, if any exists with certainty
1859  * @param pending A pass-by-reference style return parameter which is only used
1860  * when returning false (i.e., uncertain). Returns most information regarding
1861  * an uncertain release sequence, including any write operations that might
1862  * break the sequence.
1863  * @return true, if the ModelExecution is certain that release_heads is complete;
1864  * false otherwise
1865  */
1866 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1867                 rel_heads_list_t *release_heads,
1868                 struct release_seq *pending) const
1869 {
1870         /* Only check for release sequences if there are no cycles */
1871         if (mo_graph->checkForCycles())
1872                 return false;
1873
1874         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1875                 ASSERT(rf->is_write());
1876
1877                 if (rf->is_release())
1878                         release_heads->push_back(rf);
1879                 else if (rf->get_last_fence_release())
1880                         release_heads->push_back(rf->get_last_fence_release());
1881                 if (!rf->is_rmw())
1882                         break; /* End of RMW chain */
1883
1884                 /** @todo Need to be smarter here...  In the linux lock
1885                  * example, this will run to the beginning of the program for
1886                  * every acquire. */
1887                 /** @todo The way to be smarter here is to keep going until 1
1888                  * thread has a release preceded by an acquire and you've seen
1889                  *       both. */
1890
1891                 /* acq_rel RMW is a sufficient stopping condition */
1892                 if (rf->is_acquire() && rf->is_release())
1893                         return true; /* complete */
1894         };
1895         if (!rf) {
1896                 /* read from future: need to settle this later */
1897                 pending->rf = NULL;
1898                 return false; /* incomplete */
1899         }
1900
1901         if (rf->is_release())
1902                 return true; /* complete */
1903
1904         /* else relaxed write
1905          * - check for fence-release in the same thread (29.8, stmt. 3)
1906          * - check modification order for contiguous subsequence
1907          *   -> rf must be same thread as release */
1908
1909         const ModelAction *fence_release = rf->get_last_fence_release();
1910         /* Synchronize with a fence-release unconditionally; we don't need to
1911          * find any more "contiguous subsequence..." for it */
1912         if (fence_release)
1913                 release_heads->push_back(fence_release);
1914
1915         int tid = id_to_int(rf->get_tid());
1916         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1917         action_list_t *list = &(*thrd_lists)[tid];
1918         action_list_t::const_reverse_iterator rit;
1919
1920         /* Find rf in the thread list */
1921         rit = std::find(list->rbegin(), list->rend(), rf);
1922         ASSERT(rit != list->rend());
1923
1924         /* Find the last {write,fence}-release */
1925         for (; rit != list->rend(); rit++) {
1926                 if (fence_release && *(*rit) < *fence_release)
1927                         break;
1928                 if ((*rit)->is_release())
1929                         break;
1930         }
1931         if (rit == list->rend()) {
1932                 /* No write-release in this thread */
1933                 return true; /* complete */
1934         } else if (fence_release && *(*rit) < *fence_release) {
1935                 /* The fence-release is more recent (and so, "stronger") than
1936                  * the most recent write-release */
1937                 return true; /* complete */
1938         } /* else, need to establish contiguous release sequence */
1939         ModelAction *release = *rit;
1940
1941         ASSERT(rf->same_thread(release));
1942
1943         pending->writes.clear();
1944
1945         bool certain = true;
1946         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1947                 if (id_to_int(rf->get_tid()) == (int)i)
1948                         continue;
1949                 list = &(*thrd_lists)[i];
1950
1951                 /* Can we ensure no future writes from this thread may break
1952                  * the release seq? */
1953                 bool future_ordered = false;
1954
1955                 ModelAction *last = get_last_action(int_to_id(i));
1956                 Thread *th = get_thread(int_to_id(i));
1957                 if ((last && rf->happens_before(last)) ||
1958                                 !is_enabled(th) ||
1959                                 th->is_complete())
1960                         future_ordered = true;
1961
1962                 ASSERT(!th->is_model_thread() || future_ordered);
1963
1964                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1965                         const ModelAction *act = *rit;
1966                         /* Reach synchronization -> this thread is complete */
1967                         if (act->happens_before(release))
1968                                 break;
1969                         if (rf->happens_before(act)) {
1970                                 future_ordered = true;
1971                                 continue;
1972                         }
1973
1974                         /* Only non-RMW writes can break release sequences */
1975                         if (!act->is_write() || act->is_rmw())
1976                                 continue;
1977
1978                         /* Check modification order */
1979                         if (mo_graph->checkReachable(rf, act)) {
1980                                 /* rf --mo--> act */
1981                                 future_ordered = true;
1982                                 continue;
1983                         }
1984                         if (mo_graph->checkReachable(act, release))
1985                                 /* act --mo--> release */
1986                                 break;
1987                         if (mo_graph->checkReachable(release, act) &&
1988                                       mo_graph->checkReachable(act, rf)) {
1989                                 /* release --mo-> act --mo--> rf */
1990                                 return true; /* complete */
1991                         }
1992                         /* act may break release sequence */
1993                         pending->writes.push_back(act);
1994                         certain = false;
1995                 }
1996                 if (!future_ordered)
1997                         certain = false; /* This thread is uncertain */
1998         }
1999
2000         if (certain) {
2001                 release_heads->push_back(release);
2002                 pending->writes.clear();
2003         } else {
2004                 pending->release = release;
2005                 pending->rf = rf;
2006         }
2007         return certain;
2008 }
2009
2010 /**
2011  * An interface for getting the release sequence head(s) with which a
2012  * given ModelAction must synchronize. This function only returns a non-empty
2013  * result when it can locate a release sequence head with certainty. Otherwise,
2014  * it may mark the internal state of the ModelExecution so that it will handle
2015  * the release sequence at a later time, causing @a acquire to update its
2016  * synchronization at some later point in execution.
2017  *
2018  * @param acquire The 'acquire' action that may synchronize with a release
2019  * sequence
2020  * @param read The read action that may read from a release sequence; this may
2021  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2022  * when 'acquire' is a fence-acquire)
2023  * @param release_heads A pass-by-reference return parameter. Will be filled
2024  * with the head(s) of the release sequence(s), if they exists with certainty.
2025  * @see ModelExecution::release_seq_heads
2026  */
2027 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2028                 ModelAction *read, rel_heads_list_t *release_heads)
2029 {
2030         const ModelAction *rf = read->get_reads_from();
2031         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2032         sequence->acquire = acquire;
2033         sequence->read = read;
2034
2035         if (!release_seq_heads(rf, release_heads, sequence)) {
2036                 /* add act to 'lazy checking' list */
2037                 pending_rel_seqs->push_back(sequence);
2038         } else {
2039                 snapshot_free(sequence);
2040         }
2041 }
2042
2043 /**
2044  * Attempt to resolve all stashed operations that might synchronize with a
2045  * release sequence for a given location. This implements the "lazy" portion of
2046  * determining whether or not a release sequence was contiguous, since not all
2047  * modification order information is present at the time an action occurs.
2048  *
2049  * @param location The location/object that should be checked for release
2050  * sequence resolutions. A NULL value means to check all locations.
2051  * @param work_queue The work queue to which to add work items as they are
2052  * generated
2053  * @return True if any updates occurred (new synchronization, new mo_graph
2054  * edges)
2055  */
2056 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2057 {
2058         bool updated = false;
2059         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2060         while (it != pending_rel_seqs->end()) {
2061                 struct release_seq *pending = *it;
2062                 ModelAction *acquire = pending->acquire;
2063                 const ModelAction *read = pending->read;
2064
2065                 /* Only resolve sequences on the given location, if provided */
2066                 if (location && read->get_location() != location) {
2067                         it++;
2068                         continue;
2069                 }
2070
2071                 const ModelAction *rf = read->get_reads_from();
2072                 rel_heads_list_t release_heads;
2073                 bool complete;
2074                 complete = release_seq_heads(rf, &release_heads, pending);
2075                 for (unsigned int i = 0; i < release_heads.size(); i++)
2076                         if (!acquire->has_synchronized_with(release_heads[i]))
2077                                 if (synchronize(release_heads[i], acquire))
2078                                         updated = true;
2079
2080                 if (updated) {
2081                         /* Re-check all pending release sequences */
2082                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2083                         /* Re-check read-acquire for mo_graph edges */
2084                         if (acquire->is_read())
2085                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2086
2087                         /* propagate synchronization to later actions */
2088                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2089                         for (; (*rit) != acquire; rit++) {
2090                                 ModelAction *propagate = *rit;
2091                                 if (acquire->happens_before(propagate)) {
2092                                         synchronize(acquire, propagate);
2093                                         /* Re-check 'propagate' for mo_graph edges */
2094                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2095                                 }
2096                         }
2097                 }
2098                 if (complete) {
2099                         it = pending_rel_seqs->erase(it);
2100                         snapshot_free(pending);
2101                 } else {
2102                         it++;
2103                 }
2104         }
2105
2106         // If we resolved promises or data races, see if we have realized a data race.
2107         checkDataRaces();
2108
2109         return updated;
2110 }
2111
2112 /**
2113  * Performs various bookkeeping operations for the current ModelAction. For
2114  * instance, adds action to the per-object, per-thread action vector and to the
2115  * action trace list of all thread actions.
2116  *
2117  * @param act is the ModelAction to add.
2118  */
2119 void ModelExecution::add_action_to_lists(ModelAction *act)
2120 {
2121         int tid = id_to_int(act->get_tid());
2122         ModelAction *uninit = NULL;
2123         int uninit_id = -1;
2124         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2125         if (list->empty() && act->is_atomic_var()) {
2126                 uninit = get_uninitialized_action(act);
2127                 uninit_id = id_to_int(uninit->get_tid());
2128                 list->push_front(uninit);
2129         }
2130         list->push_back(act);
2131
2132         action_trace->push_back(act);
2133         if (uninit)
2134                 action_trace->push_front(uninit);
2135
2136         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2137         if (tid >= (int)vec->size())
2138                 vec->resize(priv->next_thread_id);
2139         (*vec)[tid].push_back(act);
2140         if (uninit)
2141                 (*vec)[uninit_id].push_front(uninit);
2142
2143         if ((int)thrd_last_action->size() <= tid)
2144                 thrd_last_action->resize(get_num_threads());
2145         (*thrd_last_action)[tid] = act;
2146         if (uninit)
2147                 (*thrd_last_action)[uninit_id] = uninit;
2148
2149         if (act->is_fence() && act->is_release()) {
2150                 if ((int)thrd_last_fence_release->size() <= tid)
2151                         thrd_last_fence_release->resize(get_num_threads());
2152                 (*thrd_last_fence_release)[tid] = act;
2153         }
2154
2155         if (act->is_wait()) {
2156                 void *mutex_loc = (void *) act->get_value();
2157                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2158
2159                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2160                 if (tid >= (int)vec->size())
2161                         vec->resize(priv->next_thread_id);
2162                 (*vec)[tid].push_back(act);
2163         }
2164 }
2165
2166 /**
2167  * @brief Get the last action performed by a particular Thread
2168  * @param tid The thread ID of the Thread in question
2169  * @return The last action in the thread
2170  */
2171 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2172 {
2173         int threadid = id_to_int(tid);
2174         if (threadid < (int)thrd_last_action->size())
2175                 return (*thrd_last_action)[id_to_int(tid)];
2176         else
2177                 return NULL;
2178 }
2179
2180 /**
2181  * @brief Get the last fence release performed by a particular Thread
2182  * @param tid The thread ID of the Thread in question
2183  * @return The last fence release in the thread, if one exists; NULL otherwise
2184  */
2185 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2186 {
2187         int threadid = id_to_int(tid);
2188         if (threadid < (int)thrd_last_fence_release->size())
2189                 return (*thrd_last_fence_release)[id_to_int(tid)];
2190         else
2191                 return NULL;
2192 }
2193
2194 /**
2195  * Gets the last memory_order_seq_cst write (in the total global sequence)
2196  * performed on a particular object (i.e., memory location), not including the
2197  * current action.
2198  * @param curr The current ModelAction; also denotes the object location to
2199  * check
2200  * @return The last seq_cst write
2201  */
2202 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2203 {
2204         void *location = curr->get_location();
2205         action_list_t *list = get_safe_ptr_action(obj_map, location);
2206         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2207         action_list_t::reverse_iterator rit;
2208         for (rit = list->rbegin(); (*rit) != curr; rit++)
2209                 ;
2210         rit++; /* Skip past curr */
2211         for ( ; rit != list->rend(); rit++)
2212                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2213                         return *rit;
2214         return NULL;
2215 }
2216
2217 /**
2218  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2219  * performed in a particular thread, prior to a particular fence.
2220  * @param tid The ID of the thread to check
2221  * @param before_fence The fence from which to begin the search; if NULL, then
2222  * search for the most recent fence in the thread.
2223  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2224  */
2225 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2226 {
2227         /* All fences should have NULL location */
2228         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2229         action_list_t::reverse_iterator rit = list->rbegin();
2230
2231         if (before_fence) {
2232                 for (; rit != list->rend(); rit++)
2233                         if (*rit == before_fence)
2234                                 break;
2235
2236                 ASSERT(*rit == before_fence);
2237                 rit++;
2238         }
2239
2240         for (; rit != list->rend(); rit++)
2241                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2242                         return *rit;
2243         return NULL;
2244 }
2245
2246 /**
2247  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2248  * location). This function identifies the mutex according to the current
2249  * action, which is presumed to perform on the same mutex.
2250  * @param curr The current ModelAction; also denotes the object location to
2251  * check
2252  * @return The last unlock operation
2253  */
2254 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2255 {
2256         void *location = curr->get_location();
2257         action_list_t *list = get_safe_ptr_action(obj_map, location);
2258         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2259         action_list_t::reverse_iterator rit;
2260         for (rit = list->rbegin(); rit != list->rend(); rit++)
2261                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2262                         return *rit;
2263         return NULL;
2264 }
2265
2266 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2267 {
2268         ModelAction *parent = get_last_action(tid);
2269         if (!parent)
2270                 parent = get_thread(tid)->get_creation();
2271         return parent;
2272 }
2273
2274 /**
2275  * Returns the clock vector for a given thread.
2276  * @param tid The thread whose clock vector we want
2277  * @return Desired clock vector
2278  */
2279 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2280 {
2281         return get_parent_action(tid)->get_cv();
2282 }
2283
2284 /**
2285  * @brief Find the promise (if any) to resolve for the current action and
2286  * remove it from the pending promise vector
2287  * @param curr The current ModelAction. Should be a write.
2288  * @return The Promise to resolve, if any; otherwise NULL
2289  */
2290 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2291 {
2292         for (unsigned int i = 0; i < promises->size(); i++)
2293                 if (curr->get_node()->get_promise(i)) {
2294                         Promise *ret = (*promises)[i];
2295                         promises->erase(promises->begin() + i);
2296                         return ret;
2297                 }
2298         return NULL;
2299 }
2300
2301 /**
2302  * Resolve a Promise with a current write.
2303  * @param write The ModelAction that is fulfilling Promises
2304  * @param promise The Promise to resolve
2305  * @return True if the Promise was successfully resolved; false otherwise
2306  */
2307 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2308 {
2309         ModelVector<ModelAction *> actions_to_check;
2310
2311         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2312                 ModelAction *read = promise->get_reader(i);
2313                 read_from(read, write);
2314                 actions_to_check.push_back(read);
2315         }
2316         /* Make sure the promise's value matches the write's value */
2317         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2318         if (!mo_graph->resolvePromise(promise, write))
2319                 priv->failed_promise = true;
2320
2321         /**
2322          * @todo  It is possible to end up in an inconsistent state, where a
2323          * "resolved" promise may still be referenced if
2324          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2325          *
2326          * Note that the inconsistency only matters when dumping mo_graph to
2327          * file.
2328          *
2329          * delete promise;
2330          */
2331
2332         //Check whether reading these writes has made threads unable to
2333         //resolve promises
2334         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2335                 ModelAction *read = actions_to_check[i];
2336                 mo_check_promises(read, true);
2337         }
2338
2339         return true;
2340 }
2341
2342 /**
2343  * Compute the set of promises that could potentially be satisfied by this
2344  * action. Note that the set computation actually appears in the Node, not in
2345  * ModelExecution.
2346  * @param curr The ModelAction that may satisfy promises
2347  */
2348 void ModelExecution::compute_promises(ModelAction *curr)
2349 {
2350         for (unsigned int i = 0; i < promises->size(); i++) {
2351                 Promise *promise = (*promises)[i];
2352                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2353                         continue;
2354
2355                 bool satisfy = true;
2356                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2357                         const ModelAction *act = promise->get_reader(j);
2358                         if (act->happens_before(curr) ||
2359                                         act->could_synchronize_with(curr)) {
2360                                 satisfy = false;
2361                                 break;
2362                         }
2363                 }
2364                 if (satisfy)
2365                         curr->get_node()->set_promise(i);
2366         }
2367 }
2368
2369 /** Checks promises in response to change in ClockVector Threads. */
2370 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2371 {
2372         for (unsigned int i = 0; i < promises->size(); i++) {
2373                 Promise *promise = (*promises)[i];
2374                 if (!promise->thread_is_available(tid))
2375                         continue;
2376                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2377                         const ModelAction *act = promise->get_reader(j);
2378                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2379                                         merge_cv->synchronized_since(act)) {
2380                                 if (promise->eliminate_thread(tid)) {
2381                                         /* Promise has failed */
2382                                         priv->failed_promise = true;
2383                                         return;
2384                                 }
2385                         }
2386                 }
2387         }
2388 }
2389
2390 void ModelExecution::check_promises_thread_disabled()
2391 {
2392         for (unsigned int i = 0; i < promises->size(); i++) {
2393                 Promise *promise = (*promises)[i];
2394                 if (promise->has_failed()) {
2395                         priv->failed_promise = true;
2396                         return;
2397                 }
2398         }
2399 }
2400
2401 /**
2402  * @brief Checks promises in response to addition to modification order for
2403  * threads.
2404  *
2405  * We test whether threads are still available for satisfying promises after an
2406  * addition to our modification order constraints. Those that are unavailable
2407  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2408  * that promise has failed.
2409  *
2410  * @param act The ModelAction which updated the modification order
2411  * @param is_read_check Should be true if act is a read and we must check for
2412  * updates to the store from which it read (there is a distinction here for
2413  * RMW's, which are both a load and a store)
2414  */
2415 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2416 {
2417         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2418
2419         for (unsigned int i = 0; i < promises->size(); i++) {
2420                 Promise *promise = (*promises)[i];
2421
2422                 // Is this promise on the same location?
2423                 if (!promise->same_location(write))
2424                         continue;
2425
2426                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2427                         const ModelAction *pread = promise->get_reader(j);
2428                         if (!pread->happens_before(act))
2429                                continue;
2430                         if (mo_graph->checkPromise(write, promise)) {
2431                                 priv->failed_promise = true;
2432                                 return;
2433                         }
2434                         break;
2435                 }
2436
2437                 // Don't do any lookups twice for the same thread
2438                 if (!promise->thread_is_available(act->get_tid()))
2439                         continue;
2440
2441                 if (mo_graph->checkReachable(promise, write)) {
2442                         if (mo_graph->checkPromise(write, promise)) {
2443                                 priv->failed_promise = true;
2444                                 return;
2445                         }
2446                 }
2447         }
2448 }
2449
2450 /**
2451  * Compute the set of writes that may break the current pending release
2452  * sequence. This information is extracted from previou release sequence
2453  * calculations.
2454  *
2455  * @param curr The current ModelAction. Must be a release sequence fixup
2456  * action.
2457  */
2458 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2459 {
2460         if (pending_rel_seqs->empty())
2461                 return;
2462
2463         struct release_seq *pending = pending_rel_seqs->back();
2464         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2465                 const ModelAction *write = pending->writes[i];
2466                 curr->get_node()->add_relseq_break(write);
2467         }
2468
2469         /* NULL means don't break the sequence; just synchronize */
2470         curr->get_node()->add_relseq_break(NULL);
2471 }
2472
2473 /**
2474  * Build up an initial set of all past writes that this 'read' action may read
2475  * from, as well as any previously-observed future values that must still be valid.
2476  *
2477  * @param curr is the current ModelAction that we are exploring; it must be a
2478  * 'read' operation.
2479  */
2480 void ModelExecution::build_may_read_from(ModelAction *curr)
2481 {
2482         SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2483         unsigned int i;
2484         ASSERT(curr->is_read());
2485
2486         ModelAction *last_sc_write = NULL;
2487
2488         if (curr->is_seqcst())
2489                 last_sc_write = get_last_seq_cst_write(curr);
2490
2491         /* Iterate over all threads */
2492         for (i = 0; i < thrd_lists->size(); i++) {
2493                 /* Iterate over actions in thread, starting from most recent */
2494                 action_list_t *list = &(*thrd_lists)[i];
2495                 action_list_t::reverse_iterator rit;
2496                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2497                         ModelAction *act = *rit;
2498
2499                         /* Only consider 'write' actions */
2500                         if (!act->is_write() || act == curr)
2501                                 continue;
2502
2503                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2504                         bool allow_read = true;
2505
2506                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2507                                 allow_read = false;
2508                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2509                                 allow_read = false;
2510
2511                         if (allow_read) {
2512                                 /* Only add feasible reads */
2513                                 mo_graph->startChanges();
2514                                 r_modification_order(curr, act);
2515                                 if (!is_infeasible())
2516                                         curr->get_node()->add_read_from_past(act);
2517                                 mo_graph->rollbackChanges();
2518                         }
2519
2520                         /* Include at most one act per-thread that "happens before" curr */
2521                         if (act->happens_before(curr))
2522                                 break;
2523                 }
2524         }
2525
2526         /* Inherit existing, promised future values */
2527         for (i = 0; i < promises->size(); i++) {
2528                 const Promise *promise = (*promises)[i];
2529                 const ModelAction *promise_read = promise->get_reader(0);
2530                 if (promise_read->same_var(curr)) {
2531                         /* Only add feasible future-values */
2532                         mo_graph->startChanges();
2533                         r_modification_order(curr, promise);
2534                         if (!is_infeasible())
2535                                 curr->get_node()->add_read_from_promise(promise_read);
2536                         mo_graph->rollbackChanges();
2537                 }
2538         }
2539
2540         /* We may find no valid may-read-from only if the execution is doomed */
2541         if (!curr->get_node()->read_from_size()) {
2542                 priv->no_valid_reads = true;
2543                 set_assert();
2544         }
2545
2546         if (DBG_ENABLED()) {
2547                 model_print("Reached read action:\n");
2548                 curr->print();
2549                 model_print("Printing read_from_past\n");
2550                 curr->get_node()->print_read_from_past();
2551                 model_print("End printing read_from_past\n");
2552         }
2553 }
2554
2555 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2556 {
2557         for ( ; write != NULL; write = write->get_reads_from()) {
2558                 /* UNINIT actions don't have a Node, and they never sleep */
2559                 if (write->is_uninitialized())
2560                         return true;
2561                 Node *prevnode = write->get_node()->get_parent();
2562
2563                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2564                 if (write->is_release() && thread_sleep)
2565                         return true;
2566                 if (!write->is_rmw())
2567                         return false;
2568         }
2569         return true;
2570 }
2571
2572 /**
2573  * @brief Get an action representing an uninitialized atomic
2574  *
2575  * This function may create a new one or try to retrieve one from the NodeStack
2576  *
2577  * @param curr The current action, which prompts the creation of an UNINIT action
2578  * @return A pointer to the UNINIT ModelAction
2579  */
2580 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2581 {
2582         Node *node = curr->get_node();
2583         ModelAction *act = node->get_uninit_action();
2584         if (!act) {
2585                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2586                 node->set_uninit_action(act);
2587         }
2588         act->create_cv(NULL);
2589         return act;
2590 }
2591
2592 static void print_list(action_list_t *list)
2593 {
2594         action_list_t::iterator it;
2595
2596         model_print("---------------------------------------------------------------------\n");
2597
2598         unsigned int hash = 0;
2599
2600         for (it = list->begin(); it != list->end(); it++) {
2601                 const ModelAction *act = *it;
2602                 if (act->get_seq_number() > 0)
2603                         act->print();
2604                 hash = hash^(hash<<3)^((*it)->hash());
2605         }
2606         model_print("HASH %u\n", hash);
2607         model_print("---------------------------------------------------------------------\n");
2608 }
2609
2610 #if SUPPORT_MOD_ORDER_DUMP
2611 void ModelExecution::dumpGraph(char *filename) const
2612 {
2613         char buffer[200];
2614         sprintf(buffer, "%s.dot", filename);
2615         FILE *file = fopen(buffer, "w");
2616         fprintf(file, "digraph %s {\n", filename);
2617         mo_graph->dumpNodes(file);
2618         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2619
2620         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2621                 ModelAction *act = *it;
2622                 if (act->is_read()) {
2623                         mo_graph->dot_print_node(file, act);
2624                         if (act->get_reads_from())
2625                                 mo_graph->dot_print_edge(file,
2626                                                 act->get_reads_from(),
2627                                                 act,
2628                                                 "label=\"rf\", color=red, weight=2");
2629                         else
2630                                 mo_graph->dot_print_edge(file,
2631                                                 act->get_reads_from_promise(),
2632                                                 act,
2633                                                 "label=\"rf\", color=red");
2634                 }
2635                 if (thread_array[act->get_tid()]) {
2636                         mo_graph->dot_print_edge(file,
2637                                         thread_array[id_to_int(act->get_tid())],
2638                                         act,
2639                                         "label=\"sb\", color=blue, weight=400");
2640                 }
2641
2642                 thread_array[act->get_tid()] = act;
2643         }
2644         fprintf(file, "}\n");
2645         model_free(thread_array);
2646         fclose(file);
2647 }
2648 #endif
2649
2650 /** @brief Prints an execution trace summary. */
2651 void ModelExecution::print_summary() const
2652 {
2653 #if SUPPORT_MOD_ORDER_DUMP
2654         char buffername[100];
2655         sprintf(buffername, "exec%04u", get_execution_number());
2656         mo_graph->dumpGraphToFile(buffername);
2657         sprintf(buffername, "graph%04u", get_execution_number());
2658         dumpGraph(buffername);
2659 #endif
2660
2661         model_print("Execution %d:", get_execution_number());
2662         if (isfeasibleprefix()) {
2663                 if (scheduler->all_threads_sleeping())
2664                         model_print(" SLEEP-SET REDUNDANT");
2665                 model_print("\n");
2666         } else
2667                 print_infeasibility(" INFEASIBLE");
2668         print_list(action_trace);
2669         model_print("\n");
2670         if (!promises->empty()) {
2671                 model_print("Pending promises:\n");
2672                 for (unsigned int i = 0; i < promises->size(); i++) {
2673                         model_print(" [P%u] ", i);
2674                         (*promises)[i]->print();
2675                 }
2676                 model_print("\n");
2677         }
2678 }
2679
2680 /**
2681  * Add a Thread to the system for the first time. Should only be called once
2682  * per thread.
2683  * @param t The Thread to add
2684  */
2685 void ModelExecution::add_thread(Thread *t)
2686 {
2687         thread_map->put(id_to_int(t->get_id()), t);
2688         if (!t->is_model_thread())
2689                 scheduler->add_thread(t);
2690 }
2691
2692 /**
2693  * @brief Get a Thread reference by its ID
2694  * @param tid The Thread's ID
2695  * @return A Thread reference
2696  */
2697 Thread * ModelExecution::get_thread(thread_id_t tid) const
2698 {
2699         return thread_map->get(id_to_int(tid));
2700 }
2701
2702 /**
2703  * @brief Get a reference to the Thread in which a ModelAction was executed
2704  * @param act The ModelAction
2705  * @return A Thread reference
2706  */
2707 Thread * ModelExecution::get_thread(const ModelAction *act) const
2708 {
2709         return get_thread(act->get_tid());
2710 }
2711
2712 /**
2713  * @brief Get a Promise's "promise number"
2714  *
2715  * A "promise number" is an index number that is unique to a promise, valid
2716  * only for a specific snapshot of an execution trace. Promises may come and go
2717  * as they are generated an resolved, so an index only retains meaning for the
2718  * current snapshot.
2719  *
2720  * @param promise The Promise to check
2721  * @return The promise index, if the promise still is valid; otherwise -1
2722  */
2723 int ModelExecution::get_promise_number(const Promise *promise) const
2724 {
2725         for (unsigned int i = 0; i < promises->size(); i++)
2726                 if ((*promises)[i] == promise)
2727                         return i;
2728         /* Not found */
2729         return -1;
2730 }
2731
2732 /**
2733  * @brief Check if a Thread is currently enabled
2734  * @param t The Thread to check
2735  * @return True if the Thread is currently enabled
2736  */
2737 bool ModelExecution::is_enabled(Thread *t) const
2738 {
2739         return scheduler->is_enabled(t);
2740 }
2741
2742 /**
2743  * @brief Check if a Thread is currently enabled
2744  * @param tid The ID of the Thread to check
2745  * @return True if the Thread is currently enabled
2746  */
2747 bool ModelExecution::is_enabled(thread_id_t tid) const
2748 {
2749         return scheduler->is_enabled(tid);
2750 }
2751
2752 /**
2753  * @brief Select the next thread to execute based on the curren action
2754  *
2755  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2756  * actions should be followed by the execution of their child thread. In either
2757  * case, the current action should determine the next thread schedule.
2758  *
2759  * @param curr The current action
2760  * @return The next thread to run, if the current action will determine this
2761  * selection; otherwise NULL
2762  */
2763 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2764 {
2765         /* Do not split atomic RMW */
2766         if (curr->is_rmwr())
2767                 return get_thread(curr);
2768         /* Follow CREATE with the created thread */
2769         if (curr->get_type() == THREAD_CREATE)
2770                 return curr->get_thread_operand();
2771         return NULL;
2772 }
2773
2774 /** @return True if the execution has taken too many steps */
2775 bool ModelExecution::too_many_steps() const
2776 {
2777         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2778 }
2779
2780 /**
2781  * Takes the next step in the execution, if possible.
2782  * @param curr The current step to take
2783  * @return Returns the next Thread to run, if any; NULL if this execution
2784  * should terminate
2785  */
2786 Thread * ModelExecution::take_step(ModelAction *curr)
2787 {
2788         Thread *curr_thrd = get_thread(curr);
2789         ASSERT(curr_thrd->get_state() == THREAD_READY);
2790
2791         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2792         curr = check_current_action(curr);
2793         ASSERT(curr);
2794
2795         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2796                 scheduler->remove_thread(curr_thrd);
2797
2798         return action_select_next_thread(curr);
2799 }
2800
2801 /**
2802  * Launch end-of-execution release sequence fixups only when
2803  * the execution is otherwise feasible AND there are:
2804  *
2805  * (1) pending release sequences
2806  * (2) pending assertions that could be invalidated by a change
2807  * in clock vectors (i.e., data races)
2808  * (3) no pending promises
2809  */
2810 void ModelExecution::fixup_release_sequences()
2811 {
2812         while (!pending_rel_seqs->empty() &&
2813                         is_feasible_prefix_ignore_relseq() &&
2814                         !unrealizedraces.empty()) {
2815                 model_print("*** WARNING: release sequence fixup action "
2816                                 "(%zu pending release seuqence(s)) ***\n",
2817                                 pending_rel_seqs->size());
2818                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2819                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2820                                 model_thread);
2821                 take_step(fixup);
2822         };
2823 }