Bug fixes from running Coverity
[model-checker.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 hard_failed_promise(false),
34                 too_many_reads(false),
35                 no_valid_reads(false),
36                 bad_synchronization(false),
37                 asserted(false)
38         { }
39
40         ~model_snapshot_members() {
41                 for (unsigned int i = 0; i < bugs.size(); i++)
42                         delete bugs[i];
43                 bugs.clear();
44         }
45
46         unsigned int next_thread_id;
47         modelclock_t used_sequence_numbers;
48         ModelAction *next_backtrack;
49         SnapVector<bug_message *> bugs;
50         bool failed_promise;
51         bool hard_failed_promise;
52         bool too_many_reads;
53         bool no_valid_reads;
54         /** @brief Incorrectly-ordered synchronization was made */
55         bool bad_synchronization;
56         bool asserted;
57
58         SNAPSHOTALLOC
59 };
60
61 /** @brief Constructor */
62 ModelExecution::ModelExecution(ModelChecker *m,
63                 const struct model_params *params,
64                 Scheduler *scheduler,
65                 NodeStack *node_stack) :
66         model(m),
67         params(params),
68         scheduler(scheduler),
69         action_trace(),
70         thread_map(2), /* We'll always need at least 2 threads */
71         obj_map(),
72         condvar_waiters_map(),
73         obj_thrd_map(),
74         promises(),
75         futurevalues(),
76         pending_rel_seqs(),
77         thrd_last_action(1),
78         thrd_last_fence_release(),
79         node_stack(node_stack),
80         priv(new struct model_snapshot_members()),
81         mo_graph(new CycleGraph())
82 {
83         /* Initialize a model-checker thread, for special ModelActions */
84         model_thread = new Thread(get_next_id());
85         add_thread(model_thread);
86         scheduler->register_engine(this);
87         node_stack->register_engine(this);
88 }
89
90 /** @brief Destructor */
91 ModelExecution::~ModelExecution()
92 {
93         for (unsigned int i = 0; i < get_num_threads(); i++)
94                 delete get_thread(int_to_id(i));
95
96         for (unsigned int i = 0; i < promises.size(); i++)
97                 delete promises[i];
98
99         delete mo_graph;
100         delete priv;
101 }
102
103 int ModelExecution::get_execution_number() const
104 {
105         return model->get_execution_number();
106 }
107
108 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
109 {
110         action_list_t *tmp = hash->get(ptr);
111         if (tmp == NULL) {
112                 tmp = new action_list_t();
113                 hash->put(ptr, tmp);
114         }
115         return tmp;
116 }
117
118 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
119 {
120         SnapVector<action_list_t> *tmp = hash->get(ptr);
121         if (tmp == NULL) {
122                 tmp = new SnapVector<action_list_t>();
123                 hash->put(ptr, tmp);
124         }
125         return tmp;
126 }
127
128 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
129 {
130         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
131         if (wrv==NULL)
132                 return NULL;
133         unsigned int thread=id_to_int(tid);
134         if (thread < wrv->size())
135                 return &(*wrv)[thread];
136         else
137                 return NULL;
138 }
139
140 /** @return a thread ID for a new Thread */
141 thread_id_t ModelExecution::get_next_id()
142 {
143         return priv->next_thread_id++;
144 }
145
146 /** @return the number of user threads created during this execution */
147 unsigned int ModelExecution::get_num_threads() const
148 {
149         return priv->next_thread_id;
150 }
151
152 /** @return a sequence number for a new ModelAction */
153 modelclock_t ModelExecution::get_next_seq_num()
154 {
155         return ++priv->used_sequence_numbers;
156 }
157
158 /**
159  * @brief Should the current action wake up a given thread?
160  *
161  * @param curr The current action
162  * @param thread The thread that we might wake up
163  * @return True, if we should wake up the sleeping thread; false otherwise
164  */
165 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
166 {
167         const ModelAction *asleep = thread->get_pending();
168         /* Don't allow partial RMW to wake anyone up */
169         if (curr->is_rmwr())
170                 return false;
171         /* Synchronizing actions may have been backtracked */
172         if (asleep->could_synchronize_with(curr))
173                 return true;
174         /* All acquire/release fences and fence-acquire/store-release */
175         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
176                 return true;
177         /* Fence-release + store can awake load-acquire on the same location */
178         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
179                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
180                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
181                         return true;
182         }
183         return false;
184 }
185
186 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
187 {
188         for (unsigned int i = 0; i < get_num_threads(); i++) {
189                 Thread *thr = get_thread(int_to_id(i));
190                 if (scheduler->is_sleep_set(thr)) {
191                         if (should_wake_up(curr, thr))
192                                 /* Remove this thread from sleep set */
193                                 scheduler->remove_sleep(thr);
194                 }
195         }
196 }
197
198 /** @brief Alert the model-checker that an incorrectly-ordered
199  * synchronization was made */
200 void ModelExecution::set_bad_synchronization()
201 {
202         priv->bad_synchronization = true;
203 }
204
205 bool ModelExecution::assert_bug(const char *msg)
206 {
207         priv->bugs.push_back(new bug_message(msg));
208
209         if (isfeasibleprefix()) {
210                 set_assert();
211                 return true;
212         }
213         return false;
214 }
215
216 /** @return True, if any bugs have been reported for this execution */
217 bool ModelExecution::have_bug_reports() const
218 {
219         return priv->bugs.size() != 0;
220 }
221
222 SnapVector<bug_message *> * ModelExecution::get_bugs() const
223 {
224         return &priv->bugs;
225 }
226
227 /**
228  * Check whether the current trace has triggered an assertion which should halt
229  * its execution.
230  *
231  * @return True, if the execution should be aborted; false otherwise
232  */
233 bool ModelExecution::has_asserted() const
234 {
235         return priv->asserted;
236 }
237
238 /**
239  * Trigger a trace assertion which should cause this execution to be halted.
240  * This can be due to a detected bug or due to an infeasibility that should
241  * halt ASAP.
242  */
243 void ModelExecution::set_assert()
244 {
245         priv->asserted = true;
246 }
247
248 /**
249  * Check if we are in a deadlock. Should only be called at the end of an
250  * execution, although it should not give false positives in the middle of an
251  * execution (there should be some ENABLED thread).
252  *
253  * @return True if program is in a deadlock; false otherwise
254  */
255 bool ModelExecution::is_deadlocked() const
256 {
257         bool blocking_threads = false;
258         for (unsigned int i = 0; i < get_num_threads(); i++) {
259                 thread_id_t tid = int_to_id(i);
260                 if (is_enabled(tid))
261                         return false;
262                 Thread *t = get_thread(tid);
263                 if (!t->is_model_thread() && t->get_pending())
264                         blocking_threads = true;
265         }
266         return blocking_threads;
267 }
268
269 /**
270  * @brief Check if we are yield-blocked
271  *
272  * A program can be "yield-blocked" if all threads are ready to execute a
273  * yield.
274  *
275  * @return True if the program is yield-blocked; false otherwise
276  */
277 bool ModelExecution::is_yieldblocked() const
278 {
279         if (!params->yieldblock)
280                 return false;
281
282         for (unsigned int i = 0; i < get_num_threads(); i++) {
283                 thread_id_t tid = int_to_id(i);
284                 Thread *t = get_thread(tid);
285                 if (t->get_pending() && t->get_pending()->is_yield())
286                         return true;
287         }
288         return false;
289 }
290
291 /**
292  * Check if this is a complete execution. That is, have all thread completed
293  * execution (rather than exiting because sleep sets have forced a redundant
294  * execution).
295  *
296  * @return True if the execution is complete.
297  */
298 bool ModelExecution::is_complete_execution() const
299 {
300         if (is_yieldblocked())
301                 return false;
302         for (unsigned int i = 0; i < get_num_threads(); i++)
303                 if (is_enabled(int_to_id(i)))
304                         return false;
305         return true;
306 }
307
308 /**
309  * @brief Find the last fence-related backtracking conflict for a ModelAction
310  *
311  * This function performs the search for the most recent conflicting action
312  * against which we should perform backtracking, as affected by fence
313  * operations. This includes pairs of potentially-synchronizing actions which
314  * occur due to fence-acquire or fence-release, and hence should be explored in
315  * the opposite execution order.
316  *
317  * @param act The current action
318  * @return The most recent action which conflicts with act due to fences
319  */
320 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
321 {
322         /* Only perform release/acquire fence backtracking for stores */
323         if (!act->is_write())
324                 return NULL;
325
326         /* Find a fence-release (or, act is a release) */
327         ModelAction *last_release;
328         if (act->is_release())
329                 last_release = act;
330         else
331                 last_release = get_last_fence_release(act->get_tid());
332         if (!last_release)
333                 return NULL;
334
335         /* Skip past the release */
336         const action_list_t *list = &action_trace;
337         action_list_t::const_reverse_iterator rit;
338         for (rit = list->rbegin(); rit != list->rend(); rit++)
339                 if (*rit == last_release)
340                         break;
341         ASSERT(rit != list->rend());
342
343         /* Find a prior:
344          *   load-acquire
345          * or
346          *   load --sb-> fence-acquire */
347         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
348         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
349         bool found_acquire_fences = false;
350         for ( ; rit != list->rend(); rit++) {
351                 ModelAction *prev = *rit;
352                 if (act->same_thread(prev))
353                         continue;
354
355                 int tid = id_to_int(prev->get_tid());
356
357                 if (prev->is_read() && act->same_var(prev)) {
358                         if (prev->is_acquire()) {
359                                 /* Found most recent load-acquire, don't need
360                                  * to search for more fences */
361                                 if (!found_acquire_fences)
362                                         return NULL;
363                         } else {
364                                 prior_loads[tid] = prev;
365                         }
366                 }
367                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
368                         found_acquire_fences = true;
369                         acquire_fences[tid] = prev;
370                 }
371         }
372
373         ModelAction *latest_backtrack = NULL;
374         for (unsigned int i = 0; i < acquire_fences.size(); i++)
375                 if (acquire_fences[i] && prior_loads[i])
376                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
377                                 latest_backtrack = acquire_fences[i];
378         return latest_backtrack;
379 }
380
381 /**
382  * @brief Find the last backtracking conflict for a ModelAction
383  *
384  * This function performs the search for the most recent conflicting action
385  * against which we should perform backtracking. This primary includes pairs of
386  * synchronizing actions which should be explored in the opposite execution
387  * order.
388  *
389  * @param act The current action
390  * @return The most recent action which conflicts with act
391  */
392 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
393 {
394         switch (act->get_type()) {
395         case ATOMIC_FENCE:
396                 /* Only seq-cst fences can (directly) cause backtracking */
397                 if (!act->is_seqcst())
398                         break;
399         case ATOMIC_READ:
400         case ATOMIC_WRITE:
401         case ATOMIC_RMW: {
402                 ModelAction *ret = NULL;
403
404                 /* linear search: from most recent to oldest */
405                 action_list_t *list = obj_map.get(act->get_location());
406                 action_list_t::reverse_iterator rit;
407                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
408                         ModelAction *prev = *rit;
409                         if (prev == act)
410                                 continue;
411                         if (prev->could_synchronize_with(act)) {
412                                 ret = prev;
413                                 break;
414                         }
415                 }
416
417                 ModelAction *ret2 = get_last_fence_conflict(act);
418                 if (!ret2)
419                         return ret;
420                 if (!ret)
421                         return ret2;
422                 if (*ret < *ret2)
423                         return ret2;
424                 return ret;
425         }
426         case ATOMIC_LOCK:
427         case ATOMIC_TRYLOCK: {
428                 /* linear search: from most recent to oldest */
429                 action_list_t *list = obj_map.get(act->get_location());
430                 action_list_t::reverse_iterator rit;
431                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
432                         ModelAction *prev = *rit;
433                         if (act->is_conflicting_lock(prev))
434                                 return prev;
435                 }
436                 break;
437         }
438         case ATOMIC_UNLOCK: {
439                 /* linear search: from most recent to oldest */
440                 action_list_t *list = obj_map.get(act->get_location());
441                 action_list_t::reverse_iterator rit;
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *prev = *rit;
444                         if (!act->same_thread(prev) && prev->is_failed_trylock())
445                                 return prev;
446                 }
447                 break;
448         }
449         case ATOMIC_WAIT: {
450                 /* linear search: from most recent to oldest */
451                 action_list_t *list = obj_map.get(act->get_location());
452                 action_list_t::reverse_iterator rit;
453                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454                         ModelAction *prev = *rit;
455                         if (!act->same_thread(prev) && prev->is_failed_trylock())
456                                 return prev;
457                         if (!act->same_thread(prev) && prev->is_notify())
458                                 return prev;
459                 }
460                 break;
461         }
462
463         case ATOMIC_NOTIFY_ALL:
464         case ATOMIC_NOTIFY_ONE: {
465                 /* linear search: from most recent to oldest */
466                 action_list_t *list = obj_map.get(act->get_location());
467                 action_list_t::reverse_iterator rit;
468                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
469                         ModelAction *prev = *rit;
470                         if (!act->same_thread(prev) && prev->is_wait())
471                                 return prev;
472                 }
473                 break;
474         }
475         default:
476                 break;
477         }
478         return NULL;
479 }
480
481 /** This method finds backtracking points where we should try to
482  * reorder the parameter ModelAction against.
483  *
484  * @param the ModelAction to find backtracking points for.
485  */
486 void ModelExecution::set_backtracking(ModelAction *act)
487 {
488         Thread *t = get_thread(act);
489         ModelAction *prev = get_last_conflict(act);
490         if (prev == NULL)
491                 return;
492
493         Node *node = prev->get_node()->get_parent();
494
495         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
496         int low_tid, high_tid;
497         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
498                 low_tid = id_to_int(act->get_tid());
499                 high_tid = low_tid + 1;
500         } else {
501                 low_tid = 0;
502                 high_tid = get_num_threads();
503         }
504
505         for (int i = low_tid; i < high_tid; i++) {
506                 thread_id_t tid = int_to_id(i);
507
508                 /* Make sure this thread can be enabled here. */
509                 if (i >= node->get_num_threads())
510                         break;
511
512                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
513                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
514                 if (node->enabled_status(tid) != THREAD_ENABLED)
515                         continue;
516
517                 /* Check if this has been explored already */
518                 if (node->has_been_explored(tid))
519                         continue;
520
521                 /* See if fairness allows */
522                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
523                         bool unfair = false;
524                         for (int t = 0; t < node->get_num_threads(); t++) {
525                                 thread_id_t tother = int_to_id(t);
526                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
527                                         unfair = true;
528                                         break;
529                                 }
530                         }
531                         if (unfair)
532                                 continue;
533                 }
534
535                 /* See if CHESS-like yield fairness allows */
536                 if (params->yieldon) {
537                         bool unfair = false;
538                         for (int t = 0; t < node->get_num_threads(); t++) {
539                                 thread_id_t tother = int_to_id(t);
540                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
541                                         unfair = true;
542                                         break;
543                                 }
544                         }
545                         if (unfair)
546                                 continue;
547                 }
548
549                 /* Cache the latest backtracking point */
550                 set_latest_backtrack(prev);
551
552                 /* If this is a new backtracking point, mark the tree */
553                 if (!node->set_backtrack(tid))
554                         continue;
555                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
556                                         id_to_int(prev->get_tid()),
557                                         id_to_int(t->get_id()));
558                 if (DBG_ENABLED()) {
559                         prev->print();
560                         act->print();
561                 }
562         }
563 }
564
565 /**
566  * @brief Cache the a backtracking point as the "most recent", if eligible
567  *
568  * Note that this does not prepare the NodeStack for this backtracking
569  * operation, it only caches the action on a per-execution basis
570  *
571  * @param act The operation at which we should explore a different next action
572  * (i.e., backtracking point)
573  * @return True, if this action is now the most recent backtracking point;
574  * false otherwise
575  */
576 bool ModelExecution::set_latest_backtrack(ModelAction *act)
577 {
578         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
579                 priv->next_backtrack = act;
580                 return true;
581         }
582         return false;
583 }
584
585 /**
586  * Returns last backtracking point. The model checker will explore a different
587  * path for this point in the next execution.
588  * @return The ModelAction at which the next execution should diverge.
589  */
590 ModelAction * ModelExecution::get_next_backtrack()
591 {
592         ModelAction *next = priv->next_backtrack;
593         priv->next_backtrack = NULL;
594         return next;
595 }
596
597 /**
598  * Processes a read model action.
599  * @param curr is the read model action to process.
600  * @return True if processing this read updates the mo_graph.
601  */
602 bool ModelExecution::process_read(ModelAction *curr)
603 {
604         Node *node = curr->get_node();
605         while (true) {
606                 bool updated = false;
607                 switch (node->get_read_from_status()) {
608                 case READ_FROM_PAST: {
609                         const ModelAction *rf = node->get_read_from_past();
610                         ASSERT(rf);
611
612                         mo_graph->startChanges();
613
614                         ASSERT(!is_infeasible());
615                         if (!check_recency(curr, rf)) {
616                                 if (node->increment_read_from()) {
617                                         mo_graph->rollbackChanges();
618                                         continue;
619                                 } else {
620                                         priv->too_many_reads = true;
621                                 }
622                         }
623
624                         updated = r_modification_order(curr, rf);
625                         read_from(curr, rf);
626                         mo_graph->commitChanges();
627                         mo_check_promises(curr, true);
628                         break;
629                 }
630                 case READ_FROM_PROMISE: {
631                         Promise *promise = curr->get_node()->get_read_from_promise();
632                         if (promise->add_reader(curr))
633                                 priv->failed_promise = true;
634                         curr->set_read_from_promise(promise);
635                         mo_graph->startChanges();
636                         if (!check_recency(curr, promise))
637                                 priv->too_many_reads = true;
638                         updated = r_modification_order(curr, promise);
639                         mo_graph->commitChanges();
640                         break;
641                 }
642                 case READ_FROM_FUTURE: {
643                         /* Read from future value */
644                         struct future_value fv = node->get_future_value();
645                         Promise *promise = new Promise(this, curr, fv);
646                         curr->set_read_from_promise(promise);
647                         promises.push_back(promise);
648                         mo_graph->startChanges();
649                         updated = r_modification_order(curr, promise);
650                         mo_graph->commitChanges();
651                         break;
652                 }
653                 default:
654                         ASSERT(false);
655                 }
656                 get_thread(curr)->set_return_value(curr->get_return_value());
657                 return updated;
658         }
659 }
660
661 /**
662  * Processes a lock, trylock, or unlock model action.  @param curr is
663  * the read model action to process.
664  *
665  * The try lock operation checks whether the lock is taken.  If not,
666  * it falls to the normal lock operation case.  If so, it returns
667  * fail.
668  *
669  * The lock operation has already been checked that it is enabled, so
670  * it just grabs the lock and synchronizes with the previous unlock.
671  *
672  * The unlock operation has to re-enable all of the threads that are
673  * waiting on the lock.
674  *
675  * @return True if synchronization was updated; false otherwise
676  */
677 bool ModelExecution::process_mutex(ModelAction *curr)
678 {
679         std::mutex *mutex = curr->get_mutex();
680         struct std::mutex_state *state = NULL;
681
682         if (mutex)
683                 state = mutex->get_state();
684
685         switch (curr->get_type()) {
686         case ATOMIC_TRYLOCK: {
687                 bool success = !state->locked;
688                 curr->set_try_lock(success);
689                 if (!success) {
690                         get_thread(curr)->set_return_value(0);
691                         break;
692                 }
693                 get_thread(curr)->set_return_value(1);
694         }
695                 //otherwise fall into the lock case
696         case ATOMIC_LOCK: {
697                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
698                         assert_bug("Lock access before initialization");
699                 state->locked = get_thread(curr);
700                 ModelAction *unlock = get_last_unlock(curr);
701                 //synchronize with the previous unlock statement
702                 if (unlock != NULL) {
703                         synchronize(unlock, curr);
704                         return true;
705                 }
706                 break;
707         }
708         case ATOMIC_WAIT:
709         case ATOMIC_UNLOCK: {
710                 /* wake up the other threads */
711                 for (unsigned int i = 0; i < get_num_threads(); i++) {
712                         Thread *t = get_thread(int_to_id(i));
713                         Thread *curr_thrd = get_thread(curr);
714                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
715                                 scheduler->wake(t);
716                 }
717
718                 /* unlock the lock - after checking who was waiting on it */
719                 state->locked = NULL;
720
721                 if (!curr->is_wait())
722                         break; /* The rest is only for ATOMIC_WAIT */
723
724                 /* Should we go to sleep? (simulate spurious failures) */
725                 if (curr->get_node()->get_misc() == 0) {
726                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
727                         /* disable us */
728                         scheduler->sleep(get_thread(curr));
729                 }
730                 break;
731         }
732         case ATOMIC_NOTIFY_ALL: {
733                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
734                 //activate all the waiting threads
735                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
736                         scheduler->wake(get_thread(*rit));
737                 }
738                 waiters->clear();
739                 break;
740         }
741         case ATOMIC_NOTIFY_ONE: {
742                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
743                 int wakeupthread = curr->get_node()->get_misc();
744                 action_list_t::iterator it = waiters->begin();
745                 advance(it, wakeupthread);
746                 scheduler->wake(get_thread(*it));
747                 waiters->erase(it);
748                 break;
749         }
750
751         default:
752                 ASSERT(0);
753         }
754         return false;
755 }
756
757 /**
758  * @brief Check if the current pending promises allow a future value to be sent
759  *
760  * It is unsafe to pass a future value back if there exists a pending promise Pr
761  * such that:
762  *
763  *    reader --exec-> Pr --exec-> writer
764  *
765  * If such Pr exists, we must save the pending future value until Pr is
766  * resolved.
767  *
768  * @param writer The operation which sends the future value. Must be a write.
769  * @param reader The operation which will observe the value. Must be a read.
770  * @return True if the future value can be sent now; false if it must wait.
771  */
772 bool ModelExecution::promises_may_allow(const ModelAction *writer,
773                 const ModelAction *reader) const
774 {
775         for (int i = promises.size() - 1; i >= 0; i--) {
776                 ModelAction *pr = promises[i]->get_reader(0);
777                 //reader is after promise...doesn't cross any promise
778                 if (*reader > *pr)
779                         return true;
780                 //writer is after promise, reader before...bad...
781                 if (*writer > *pr)
782                         return false;
783         }
784         return true;
785 }
786
787 /**
788  * @brief Add a future value to a reader
789  *
790  * This function performs a few additional checks to ensure that the future
791  * value can be feasibly observed by the reader
792  *
793  * @param writer The operation whose value is sent. Must be a write.
794  * @param reader The read operation which may read the future value. Must be a read.
795  */
796 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
797 {
798         /* Do more ambitious checks now that mo is more complete */
799         if (!mo_may_allow(writer, reader))
800                 return;
801
802         Node *node = reader->get_node();
803
804         /* Find an ancestor thread which exists at the time of the reader */
805         Thread *write_thread = get_thread(writer);
806         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
807                 write_thread = write_thread->get_parent();
808
809         struct future_value fv = {
810                 writer->get_write_value(),
811                 writer->get_seq_number() + params->maxfuturedelay,
812                 write_thread->get_id(),
813         };
814         if (node->add_future_value(fv))
815                 set_latest_backtrack(reader);
816 }
817
818 /**
819  * Process a write ModelAction
820  * @param curr The ModelAction to process
821  * @param work The work queue, for adding fixup work
822  * @return True if the mo_graph was updated or promises were resolved
823  */
824 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
825 {
826         /* Readers to which we may send our future value */
827         ModelVector<ModelAction *> send_fv;
828
829         const ModelAction *earliest_promise_reader;
830         bool updated_promises = false;
831
832         bool updated_mod_order = w_modification_order(curr, &send_fv);
833         Promise *promise = pop_promise_to_resolve(curr);
834
835         if (promise) {
836                 earliest_promise_reader = promise->get_reader(0);
837                 updated_promises = resolve_promise(curr, promise, work);
838         } else
839                 earliest_promise_reader = NULL;
840
841         for (unsigned int i = 0; i < send_fv.size(); i++) {
842                 ModelAction *read = send_fv[i];
843
844                 /* Don't send future values to reads after the Promise we resolve */
845                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
846                         /* Check if future value can be sent immediately */
847                         if (promises_may_allow(curr, read)) {
848                                 add_future_value(curr, read);
849                         } else {
850                                 futurevalues.push_back(PendingFutureValue(curr, read));
851                         }
852                 }
853         }
854
855         /* Check the pending future values */
856         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
857                 struct PendingFutureValue pfv = futurevalues[i];
858                 if (promises_may_allow(pfv.writer, pfv.reader)) {
859                         add_future_value(pfv.writer, pfv.reader);
860                         futurevalues.erase(futurevalues.begin() + i);
861                 }
862         }
863
864         mo_graph->commitChanges();
865         mo_check_promises(curr, false);
866
867         get_thread(curr)->set_return_value(VALUE_NONE);
868         return updated_mod_order || updated_promises;
869 }
870
871 /**
872  * Process a fence ModelAction
873  * @param curr The ModelAction to process
874  * @return True if synchronization was updated
875  */
876 bool ModelExecution::process_fence(ModelAction *curr)
877 {
878         /*
879          * fence-relaxed: no-op
880          * fence-release: only log the occurence (not in this function), for
881          *   use in later synchronization
882          * fence-acquire (this function): search for hypothetical release
883          *   sequences
884          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
885          */
886         bool updated = false;
887         if (curr->is_acquire()) {
888                 action_list_t *list = &action_trace;
889                 action_list_t::reverse_iterator rit;
890                 /* Find X : is_read(X) && X --sb-> curr */
891                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
892                         ModelAction *act = *rit;
893                         if (act == curr)
894                                 continue;
895                         if (act->get_tid() != curr->get_tid())
896                                 continue;
897                         /* Stop at the beginning of the thread */
898                         if (act->is_thread_start())
899                                 break;
900                         /* Stop once we reach a prior fence-acquire */
901                         if (act->is_fence() && act->is_acquire())
902                                 break;
903                         if (!act->is_read())
904                                 continue;
905                         /* read-acquire will find its own release sequences */
906                         if (act->is_acquire())
907                                 continue;
908
909                         /* Establish hypothetical release sequences */
910                         rel_heads_list_t release_heads;
911                         get_release_seq_heads(curr, act, &release_heads);
912                         for (unsigned int i = 0; i < release_heads.size(); i++)
913                                 synchronize(release_heads[i], curr);
914                         if (release_heads.size() != 0)
915                                 updated = true;
916                 }
917         }
918         return updated;
919 }
920
921 /**
922  * @brief Process the current action for thread-related activity
923  *
924  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
925  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
926  * synchronization, etc.  This function is a no-op for non-THREAD actions
927  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
928  *
929  * @param curr The current action
930  * @return True if synchronization was updated or a thread completed
931  */
932 bool ModelExecution::process_thread_action(ModelAction *curr)
933 {
934         bool updated = false;
935
936         switch (curr->get_type()) {
937         case THREAD_CREATE: {
938                 thrd_t *thrd = (thrd_t *)curr->get_location();
939                 struct thread_params *params = (struct thread_params *)curr->get_value();
940                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
941                 add_thread(th);
942                 th->set_creation(curr);
943                 /* Promises can be satisfied by children */
944                 for (unsigned int i = 0; i < promises.size(); i++) {
945                         Promise *promise = promises[i];
946                         if (promise->thread_is_available(curr->get_tid()))
947                                 promise->add_thread(th->get_id());
948                 }
949                 break;
950         }
951         case THREAD_JOIN: {
952                 Thread *blocking = curr->get_thread_operand();
953                 ModelAction *act = get_last_action(blocking->get_id());
954                 synchronize(act, curr);
955                 updated = true; /* trigger rel-seq checks */
956                 break;
957         }
958         case THREAD_FINISH: {
959                 Thread *th = get_thread(curr);
960                 /* Wake up any joining threads */
961                 for (unsigned int i = 0; i < get_num_threads(); i++) {
962                         Thread *waiting = get_thread(int_to_id(i));
963                         if (waiting->waiting_on() == th &&
964                                         waiting->get_pending()->is_thread_join())
965                                 scheduler->wake(waiting);
966                 }
967                 th->complete();
968                 /* Completed thread can't satisfy promises */
969                 for (unsigned int i = 0; i < promises.size(); i++) {
970                         Promise *promise = promises[i];
971                         if (promise->thread_is_available(th->get_id()))
972                                 if (promise->eliminate_thread(th->get_id()))
973                                         priv->failed_promise = true;
974                 }
975                 updated = true; /* trigger rel-seq checks */
976                 break;
977         }
978         case THREAD_START: {
979                 check_promises(curr->get_tid(), NULL, curr->get_cv());
980                 break;
981         }
982         default:
983                 break;
984         }
985
986         return updated;
987 }
988
989 /**
990  * @brief Process the current action for release sequence fixup activity
991  *
992  * Performs model-checker release sequence fixups for the current action,
993  * forcing a single pending release sequence to break (with a given, potential
994  * "loose" write) or to complete (i.e., synchronize). If a pending release
995  * sequence forms a complete release sequence, then we must perform the fixup
996  * synchronization, mo_graph additions, etc.
997  *
998  * @param curr The current action; must be a release sequence fixup action
999  * @param work_queue The work queue to which to add work items as they are
1000  * generated
1001  */
1002 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1003 {
1004         const ModelAction *write = curr->get_node()->get_relseq_break();
1005         struct release_seq *sequence = pending_rel_seqs.back();
1006         pending_rel_seqs.pop_back();
1007         ASSERT(sequence);
1008         ModelAction *acquire = sequence->acquire;
1009         const ModelAction *rf = sequence->rf;
1010         const ModelAction *release = sequence->release;
1011         ASSERT(acquire);
1012         ASSERT(release);
1013         ASSERT(rf);
1014         ASSERT(release->same_thread(rf));
1015
1016         if (write == NULL) {
1017                 /**
1018                  * @todo Forcing a synchronization requires that we set
1019                  * modification order constraints. For instance, we can't allow
1020                  * a fixup sequence in which two separate read-acquire
1021                  * operations read from the same sequence, where the first one
1022                  * synchronizes and the other doesn't. Essentially, we can't
1023                  * allow any writes to insert themselves between 'release' and
1024                  * 'rf'
1025                  */
1026
1027                 /* Must synchronize */
1028                 if (!synchronize(release, acquire))
1029                         return;
1030
1031                 /* Propagate the changed clock vector */
1032                 propagate_clockvector(acquire, work_queue);
1033         } else {
1034                 /* Break release sequence with new edges:
1035                  *   release --mo--> write --mo--> rf */
1036                 mo_graph->addEdge(release, write);
1037                 mo_graph->addEdge(write, rf);
1038         }
1039
1040         /* See if we have realized a data race */
1041         checkDataRaces();
1042 }
1043
1044 /**
1045  * Initialize the current action by performing one or more of the following
1046  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1047  * in the NodeStack, manipulating backtracking sets, allocating and
1048  * initializing clock vectors, and computing the promises to fulfill.
1049  *
1050  * @param curr The current action, as passed from the user context; may be
1051  * freed/invalidated after the execution of this function, with a different
1052  * action "returned" its place (pass-by-reference)
1053  * @return True if curr is a newly-explored action; false otherwise
1054  */
1055 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1056 {
1057         ModelAction *newcurr;
1058
1059         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1060                 newcurr = process_rmw(*curr);
1061                 delete *curr;
1062
1063                 if (newcurr->is_rmw())
1064                         compute_promises(newcurr);
1065
1066                 *curr = newcurr;
1067                 return false;
1068         }
1069
1070         (*curr)->set_seq_number(get_next_seq_num());
1071
1072         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1073         if (newcurr) {
1074                 /* First restore type and order in case of RMW operation */
1075                 if ((*curr)->is_rmwr())
1076                         newcurr->copy_typeandorder(*curr);
1077
1078                 ASSERT((*curr)->get_location() == newcurr->get_location());
1079                 newcurr->copy_from_new(*curr);
1080
1081                 /* Discard duplicate ModelAction; use action from NodeStack */
1082                 delete *curr;
1083
1084                 /* Always compute new clock vector */
1085                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1086
1087                 *curr = newcurr;
1088                 return false; /* Action was explored previously */
1089         } else {
1090                 newcurr = *curr;
1091
1092                 /* Always compute new clock vector */
1093                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1094
1095                 /* Assign most recent release fence */
1096                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1097
1098                 /*
1099                  * Perform one-time actions when pushing new ModelAction onto
1100                  * NodeStack
1101                  */
1102                 if (newcurr->is_write())
1103                         compute_promises(newcurr);
1104                 else if (newcurr->is_relseq_fixup())
1105                         compute_relseq_breakwrites(newcurr);
1106                 else if (newcurr->is_wait())
1107                         newcurr->get_node()->set_misc_max(2);
1108                 else if (newcurr->is_notify_one()) {
1109                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1110                 }
1111                 return true; /* This was a new ModelAction */
1112         }
1113 }
1114
1115 /**
1116  * @brief Establish reads-from relation between two actions
1117  *
1118  * Perform basic operations involved with establishing a concrete rf relation,
1119  * including setting the ModelAction data and checking for release sequences.
1120  *
1121  * @param act The action that is reading (must be a read)
1122  * @param rf The action from which we are reading (must be a write)
1123  *
1124  * @return True if this read established synchronization
1125  */
1126 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1127 {
1128         ASSERT(rf);
1129         ASSERT(rf->is_write());
1130
1131         act->set_read_from(rf);
1132         if (act->is_acquire()) {
1133                 rel_heads_list_t release_heads;
1134                 get_release_seq_heads(act, act, &release_heads);
1135                 int num_heads = release_heads.size();
1136                 for (unsigned int i = 0; i < release_heads.size(); i++)
1137                         if (!synchronize(release_heads[i], act))
1138                                 num_heads--;
1139                 return num_heads > 0;
1140         }
1141         return false;
1142 }
1143
1144 /**
1145  * @brief Synchronizes two actions
1146  *
1147  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1148  * This function performs the synchronization as well as providing other hooks
1149  * for other checks along with synchronization.
1150  *
1151  * @param first The left-hand side of the synchronizes-with relation
1152  * @param second The right-hand side of the synchronizes-with relation
1153  * @return True if the synchronization was successful (i.e., was consistent
1154  * with the execution order); false otherwise
1155  */
1156 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1157 {
1158         if (*second < *first) {
1159                 set_bad_synchronization();
1160                 return false;
1161         }
1162         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1163         return second->synchronize_with(first);
1164 }
1165
1166 /**
1167  * Check promises and eliminate potentially-satisfying threads when a thread is
1168  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1169  * no longer satisfy a promise generated from that thread.
1170  *
1171  * @param blocker The thread on which a thread is waiting
1172  * @param waiting The waiting thread
1173  */
1174 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1175 {
1176         for (unsigned int i = 0; i < promises.size(); i++) {
1177                 Promise *promise = promises[i];
1178                 if (!promise->thread_is_available(waiting->get_id()))
1179                         continue;
1180                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1181                         ModelAction *reader = promise->get_reader(j);
1182                         if (reader->get_tid() != blocker->get_id())
1183                                 continue;
1184                         if (promise->eliminate_thread(waiting->get_id())) {
1185                                 /* Promise has failed */
1186                                 priv->failed_promise = true;
1187                         } else {
1188                                 /* Only eliminate the 'waiting' thread once */
1189                                 return;
1190                         }
1191                 }
1192         }
1193 }
1194
1195 /**
1196  * @brief Check whether a model action is enabled.
1197  *
1198  * Checks whether an operation would be successful (i.e., is a lock already
1199  * locked, or is the joined thread already complete).
1200  *
1201  * For yield-blocking, yields are never enabled.
1202  *
1203  * @param curr is the ModelAction to check whether it is enabled.
1204  * @return a bool that indicates whether the action is enabled.
1205  */
1206 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1207         if (curr->is_lock()) {
1208                 std::mutex *lock = curr->get_mutex();
1209                 struct std::mutex_state *state = lock->get_state();
1210                 if (state->locked)
1211                         return false;
1212         } else if (curr->is_thread_join()) {
1213                 Thread *blocking = curr->get_thread_operand();
1214                 if (!blocking->is_complete()) {
1215                         thread_blocking_check_promises(blocking, get_thread(curr));
1216                         return false;
1217                 }
1218         } else if (params->yieldblock && curr->is_yield()) {
1219                 return false;
1220         }
1221
1222         return true;
1223 }
1224
1225 /**
1226  * This is the heart of the model checker routine. It performs model-checking
1227  * actions corresponding to a given "current action." Among other processes, it
1228  * calculates reads-from relationships, updates synchronization clock vectors,
1229  * forms a memory_order constraints graph, and handles replay/backtrack
1230  * execution when running permutations of previously-observed executions.
1231  *
1232  * @param curr The current action to process
1233  * @return The ModelAction that is actually executed; may be different than
1234  * curr
1235  */
1236 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1237 {
1238         ASSERT(curr);
1239         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1240         bool newly_explored = initialize_curr_action(&curr);
1241
1242         DBG();
1243
1244         wake_up_sleeping_actions(curr);
1245
1246         /* Compute fairness information for CHESS yield algorithm */
1247         if (params->yieldon) {
1248                 curr->get_node()->update_yield(scheduler);
1249         }
1250
1251         /* Add the action to lists before any other model-checking tasks */
1252         if (!second_part_of_rmw)
1253                 add_action_to_lists(curr);
1254
1255         /* Build may_read_from set for newly-created actions */
1256         if (newly_explored && curr->is_read())
1257                 build_may_read_from(curr);
1258
1259         /* Initialize work_queue with the "current action" work */
1260         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1261         while (!work_queue.empty() && !has_asserted()) {
1262                 WorkQueueEntry work = work_queue.front();
1263                 work_queue.pop_front();
1264
1265                 switch (work.type) {
1266                 case WORK_CHECK_CURR_ACTION: {
1267                         ModelAction *act = work.action;
1268                         bool update = false; /* update this location's release seq's */
1269                         bool update_all = false; /* update all release seq's */
1270
1271                         if (process_thread_action(curr))
1272                                 update_all = true;
1273
1274                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1275                                 update = true;
1276
1277                         if (act->is_write() && process_write(act, &work_queue))
1278                                 update = true;
1279
1280                         if (act->is_fence() && process_fence(act))
1281                                 update_all = true;
1282
1283                         if (act->is_mutex_op() && process_mutex(act))
1284                                 update_all = true;
1285
1286                         if (act->is_relseq_fixup())
1287                                 process_relseq_fixup(curr, &work_queue);
1288
1289                         if (update_all)
1290                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1291                         else if (update)
1292                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1293                         break;
1294                 }
1295                 case WORK_CHECK_RELEASE_SEQ:
1296                         resolve_release_sequences(work.location, &work_queue);
1297                         break;
1298                 case WORK_CHECK_MO_EDGES: {
1299                         /** @todo Complete verification of work_queue */
1300                         ModelAction *act = work.action;
1301                         bool updated = false;
1302
1303                         if (act->is_read()) {
1304                                 const ModelAction *rf = act->get_reads_from();
1305                                 const Promise *promise = act->get_reads_from_promise();
1306                                 if (rf) {
1307                                         if (r_modification_order(act, rf))
1308                                                 updated = true;
1309                                 } else if (promise) {
1310                                         if (r_modification_order(act, promise))
1311                                                 updated = true;
1312                                 }
1313                         }
1314                         if (act->is_write()) {
1315                                 if (w_modification_order(act, NULL))
1316                                         updated = true;
1317                         }
1318                         mo_graph->commitChanges();
1319
1320                         if (updated)
1321                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1322                         break;
1323                 }
1324                 default:
1325                         ASSERT(false);
1326                         break;
1327                 }
1328         }
1329
1330         check_curr_backtracking(curr);
1331         set_backtracking(curr);
1332         return curr;
1333 }
1334
1335 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1336 {
1337         Node *currnode = curr->get_node();
1338         Node *parnode = currnode->get_parent();
1339
1340         if ((parnode && !parnode->backtrack_empty()) ||
1341                          !currnode->misc_empty() ||
1342                          !currnode->read_from_empty() ||
1343                          !currnode->promise_empty() ||
1344                          !currnode->relseq_break_empty()) {
1345                 set_latest_backtrack(curr);
1346         }
1347 }
1348
1349 bool ModelExecution::promises_expired() const
1350 {
1351         for (unsigned int i = 0; i < promises.size(); i++) {
1352                 Promise *promise = promises[i];
1353                 if (promise->get_expiration() < priv->used_sequence_numbers)
1354                         return true;
1355         }
1356         return false;
1357 }
1358
1359 /**
1360  * This is the strongest feasibility check available.
1361  * @return whether the current trace (partial or complete) must be a prefix of
1362  * a feasible trace.
1363  */
1364 bool ModelExecution::isfeasibleprefix() const
1365 {
1366         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1367 }
1368
1369 /**
1370  * Print disagnostic information about an infeasible execution
1371  * @param prefix A string to prefix the output with; if NULL, then a default
1372  * message prefix will be provided
1373  */
1374 void ModelExecution::print_infeasibility(const char *prefix) const
1375 {
1376         char buf[100];
1377         char *ptr = buf;
1378         if (mo_graph->checkForCycles())
1379                 ptr += sprintf(ptr, "[mo cycle]");
1380         if (priv->failed_promise || priv->hard_failed_promise)
1381                 ptr += sprintf(ptr, "[failed promise]");
1382         if (priv->too_many_reads)
1383                 ptr += sprintf(ptr, "[too many reads]");
1384         if (priv->no_valid_reads)
1385                 ptr += sprintf(ptr, "[no valid reads-from]");
1386         if (priv->bad_synchronization)
1387                 ptr += sprintf(ptr, "[bad sw ordering]");
1388         if (promises_expired())
1389                 ptr += sprintf(ptr, "[promise expired]");
1390         if (promises.size() != 0)
1391                 ptr += sprintf(ptr, "[unresolved promise]");
1392         if (ptr != buf)
1393                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1394 }
1395
1396 /**
1397  * Returns whether the current completed trace is feasible, except for pending
1398  * release sequences.
1399  */
1400 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1401 {
1402         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1403
1404 }
1405
1406 /**
1407  * Check if the current partial trace is infeasible. Does not check any
1408  * end-of-execution flags, which might rule out the execution. Thus, this is
1409  * useful only for ruling an execution as infeasible.
1410  * @return whether the current partial trace is infeasible.
1411  */
1412 bool ModelExecution::is_infeasible() const
1413 {
1414         return mo_graph->checkForCycles() ||
1415                 priv->no_valid_reads ||
1416                 priv->too_many_reads ||
1417                 priv->bad_synchronization ||
1418                 priv->hard_failed_promise ||
1419                 promises_expired();
1420 }
1421
1422 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1423 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1424         ModelAction *lastread = get_last_action(act->get_tid());
1425         lastread->process_rmw(act);
1426         if (act->is_rmw()) {
1427                 if (lastread->get_reads_from())
1428                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1429                 else
1430                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1431                 mo_graph->commitChanges();
1432         }
1433         return lastread;
1434 }
1435
1436 /**
1437  * A helper function for ModelExecution::check_recency, to check if the current
1438  * thread is able to read from a different write/promise for 'params.maxreads'
1439  * number of steps and if that write/promise should become visible (i.e., is
1440  * ordered later in the modification order). This helps model memory liveness.
1441  *
1442  * @param curr The current action. Must be a read.
1443  * @param rf The write/promise from which we plan to read
1444  * @param other_rf The write/promise from which we may read
1445  * @return True if we were able to read from other_rf for params.maxreads steps
1446  */
1447 template <typename T, typename U>
1448 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1449 {
1450         /* Need a different write/promise */
1451         if (other_rf->equals(rf))
1452                 return false;
1453
1454         /* Only look for "newer" writes/promises */
1455         if (!mo_graph->checkReachable(rf, other_rf))
1456                 return false;
1457
1458         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1459         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1460         action_list_t::reverse_iterator rit = list->rbegin();
1461         ASSERT((*rit) == curr);
1462         /* Skip past curr */
1463         rit++;
1464
1465         /* Does this write/promise work for everyone? */
1466         for (int i = 0; i < params->maxreads; i++, rit++) {
1467                 ModelAction *act = *rit;
1468                 if (!act->may_read_from(other_rf))
1469                         return false;
1470         }
1471         return true;
1472 }
1473
1474 /**
1475  * Checks whether a thread has read from the same write or Promise for too many
1476  * times without seeing the effects of a later write/Promise.
1477  *
1478  * Basic idea:
1479  * 1) there must a different write/promise that we could read from,
1480  * 2) we must have read from the same write/promise in excess of maxreads times,
1481  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1482  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1483  *
1484  * If so, we decide that the execution is no longer feasible.
1485  *
1486  * @param curr The current action. Must be a read.
1487  * @param rf The ModelAction/Promise from which we might read.
1488  * @return True if the read should succeed; false otherwise
1489  */
1490 template <typename T>
1491 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1492 {
1493         if (!params->maxreads)
1494                 return true;
1495
1496         //NOTE: Next check is just optimization, not really necessary....
1497         if (curr->get_node()->get_read_from_past_size() +
1498                         curr->get_node()->get_read_from_promise_size() <= 1)
1499                 return true;
1500
1501         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1502         int tid = id_to_int(curr->get_tid());
1503         ASSERT(tid < (int)thrd_lists->size());
1504         action_list_t *list = &(*thrd_lists)[tid];
1505         action_list_t::reverse_iterator rit = list->rbegin();
1506         ASSERT((*rit) == curr);
1507         /* Skip past curr */
1508         rit++;
1509
1510         action_list_t::reverse_iterator ritcopy = rit;
1511         /* See if we have enough reads from the same value */
1512         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1513                 if (ritcopy == list->rend())
1514                         return true;
1515                 ModelAction *act = *ritcopy;
1516                 if (!act->is_read())
1517                         return true;
1518                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1519                         return true;
1520                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1521                         return true;
1522                 if (act->get_node()->get_read_from_past_size() +
1523                                 act->get_node()->get_read_from_promise_size() <= 1)
1524                         return true;
1525         }
1526         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1527                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1528                 if (should_read_instead(curr, rf, write))
1529                         return false; /* liveness failure */
1530         }
1531         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1532                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1533                 if (should_read_instead(curr, rf, promise))
1534                         return false; /* liveness failure */
1535         }
1536         return true;
1537 }
1538
1539 /**
1540  * @brief Updates the mo_graph with the constraints imposed from the current
1541  * read.
1542  *
1543  * Basic idea is the following: Go through each other thread and find
1544  * the last action that happened before our read.  Two cases:
1545  *
1546  * -# The action is a write: that write must either occur before
1547  * the write we read from or be the write we read from.
1548  * -# The action is a read: the write that that action read from
1549  * must occur before the write we read from or be the same write.
1550  *
1551  * @param curr The current action. Must be a read.
1552  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1553  * @return True if modification order edges were added; false otherwise
1554  */
1555 template <typename rf_type>
1556 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1557 {
1558         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1559         unsigned int i;
1560         bool added = false;
1561         ASSERT(curr->is_read());
1562
1563         /* Last SC fence in the current thread */
1564         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1565         ModelAction *last_sc_write = NULL;
1566         if (curr->is_seqcst())
1567                 last_sc_write = get_last_seq_cst_write(curr);
1568
1569         /* Iterate over all threads */
1570         for (i = 0; i < thrd_lists->size(); i++) {
1571                 /* Last SC fence in thread i */
1572                 ModelAction *last_sc_fence_thread_local = NULL;
1573                 if (int_to_id((int)i) != curr->get_tid())
1574                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1575
1576                 /* Last SC fence in thread i, before last SC fence in current thread */
1577                 ModelAction *last_sc_fence_thread_before = NULL;
1578                 if (last_sc_fence_local)
1579                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1580
1581                 /* Iterate over actions in thread, starting from most recent */
1582                 action_list_t *list = &(*thrd_lists)[i];
1583                 action_list_t::reverse_iterator rit;
1584                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1585                         ModelAction *act = *rit;
1586
1587                         /* Skip curr */
1588                         if (act == curr)
1589                                 continue;
1590                         /* Don't want to add reflexive edges on 'rf' */
1591                         if (act->equals(rf)) {
1592                                 if (act->happens_before(curr))
1593                                         break;
1594                                 else
1595                                         continue;
1596                         }
1597
1598                         if (act->is_write()) {
1599                                 /* C++, Section 29.3 statement 5 */
1600                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1601                                                 *act < *last_sc_fence_thread_local) {
1602                                         added = mo_graph->addEdge(act, rf) || added;
1603                                         break;
1604                                 }
1605                                 /* C++, Section 29.3 statement 4 */
1606                                 else if (act->is_seqcst() && last_sc_fence_local &&
1607                                                 *act < *last_sc_fence_local) {
1608                                         added = mo_graph->addEdge(act, rf) || added;
1609                                         break;
1610                                 }
1611                                 /* C++, Section 29.3 statement 6 */
1612                                 else if (last_sc_fence_thread_before &&
1613                                                 *act < *last_sc_fence_thread_before) {
1614                                         added = mo_graph->addEdge(act, rf) || added;
1615                                         break;
1616                                 }
1617                         }
1618
1619                         /* C++, Section 29.3 statement 3 (second subpoint) */
1620                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1621                                 added = mo_graph->addEdge(act, rf) || added;
1622                                 break;
1623                         }
1624
1625                         /*
1626                          * Include at most one act per-thread that "happens
1627                          * before" curr
1628                          */
1629                         if (act->happens_before(curr)) {
1630                                 if (act->is_write()) {
1631                                         added = mo_graph->addEdge(act, rf) || added;
1632                                 } else {
1633                                         const ModelAction *prevrf = act->get_reads_from();
1634                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1635                                         if (prevrf) {
1636                                                 if (!prevrf->equals(rf))
1637                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1638                                         } else if (!prevrf_promise->equals(rf)) {
1639                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1640                                         }
1641                                 }
1642                                 break;
1643                         }
1644                 }
1645         }
1646
1647         /*
1648          * All compatible, thread-exclusive promises must be ordered after any
1649          * concrete loads from the same thread
1650          */
1651         for (unsigned int i = 0; i < promises.size(); i++)
1652                 if (promises[i]->is_compatible_exclusive(curr))
1653                         added = mo_graph->addEdge(rf, promises[i]) || added;
1654
1655         return added;
1656 }
1657
1658 /**
1659  * Updates the mo_graph with the constraints imposed from the current write.
1660  *
1661  * Basic idea is the following: Go through each other thread and find
1662  * the lastest action that happened before our write.  Two cases:
1663  *
1664  * (1) The action is a write => that write must occur before
1665  * the current write
1666  *
1667  * (2) The action is a read => the write that that action read from
1668  * must occur before the current write.
1669  *
1670  * This method also handles two other issues:
1671  *
1672  * (I) Sequential Consistency: Making sure that if the current write is
1673  * seq_cst, that it occurs after the previous seq_cst write.
1674  *
1675  * (II) Sending the write back to non-synchronizing reads.
1676  *
1677  * @param curr The current action. Must be a write.
1678  * @param send_fv A vector for stashing reads to which we may pass our future
1679  * value. If NULL, then don't record any future values.
1680  * @return True if modification order edges were added; false otherwise
1681  */
1682 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1683 {
1684         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1685         unsigned int i;
1686         bool added = false;
1687         ASSERT(curr->is_write());
1688
1689         if (curr->is_seqcst()) {
1690                 /* We have to at least see the last sequentially consistent write,
1691                          so we are initialized. */
1692                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1693                 if (last_seq_cst != NULL) {
1694                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1695                 }
1696         }
1697
1698         /* Last SC fence in the current thread */
1699         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1700
1701         /* Iterate over all threads */
1702         for (i = 0; i < thrd_lists->size(); i++) {
1703                 /* Last SC fence in thread i, before last SC fence in current thread */
1704                 ModelAction *last_sc_fence_thread_before = NULL;
1705                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1706                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1707
1708                 /* Iterate over actions in thread, starting from most recent */
1709                 action_list_t *list = &(*thrd_lists)[i];
1710                 action_list_t::reverse_iterator rit;
1711                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1712                         ModelAction *act = *rit;
1713                         if (act == curr) {
1714                                 /*
1715                                  * 1) If RMW and it actually read from something, then we
1716                                  * already have all relevant edges, so just skip to next
1717                                  * thread.
1718                                  *
1719                                  * 2) If RMW and it didn't read from anything, we should
1720                                  * whatever edge we can get to speed up convergence.
1721                                  *
1722                                  * 3) If normal write, we need to look at earlier actions, so
1723                                  * continue processing list.
1724                                  */
1725                                 if (curr->is_rmw()) {
1726                                         if (curr->get_reads_from() != NULL)
1727                                                 break;
1728                                         else
1729                                                 continue;
1730                                 } else
1731                                         continue;
1732                         }
1733
1734                         /* C++, Section 29.3 statement 7 */
1735                         if (last_sc_fence_thread_before && act->is_write() &&
1736                                         *act < *last_sc_fence_thread_before) {
1737                                 added = mo_graph->addEdge(act, curr) || added;
1738                                 break;
1739                         }
1740
1741                         /*
1742                          * Include at most one act per-thread that "happens
1743                          * before" curr
1744                          */
1745                         if (act->happens_before(curr)) {
1746                                 /*
1747                                  * Note: if act is RMW, just add edge:
1748                                  *   act --mo--> curr
1749                                  * The following edge should be handled elsewhere:
1750                                  *   readfrom(act) --mo--> act
1751                                  */
1752                                 if (act->is_write())
1753                                         added = mo_graph->addEdge(act, curr) || added;
1754                                 else if (act->is_read()) {
1755                                         //if previous read accessed a null, just keep going
1756                                         if (act->get_reads_from() == NULL) {
1757                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1758                                         } else
1759                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1760                                 }
1761                                 break;
1762                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1763                                                      !act->same_thread(curr)) {
1764                                 /* We have an action that:
1765                                    (1) did not happen before us
1766                                    (2) is a read and we are a write
1767                                    (3) cannot synchronize with us
1768                                    (4) is in a different thread
1769                                    =>
1770                                    that read could potentially read from our write.  Note that
1771                                    these checks are overly conservative at this point, we'll
1772                                    do more checks before actually removing the
1773                                    pendingfuturevalue.
1774
1775                                  */
1776
1777                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1778                                         if (!is_infeasible())
1779                                                 send_fv->push_back(act);
1780                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1781                                                 add_future_value(curr, act);
1782                                 }
1783                         }
1784                 }
1785         }
1786
1787         /*
1788          * All compatible, thread-exclusive promises must be ordered after any
1789          * concrete stores to the same thread, or else they can be merged with
1790          * this store later
1791          */
1792         for (unsigned int i = 0; i < promises.size(); i++)
1793                 if (promises[i]->is_compatible_exclusive(curr))
1794                         added = mo_graph->addEdge(curr, promises[i]) || added;
1795
1796         return added;
1797 }
1798
1799 //This procedure uses cohere to prune future values that are
1800 //guaranteed to generate a coherence violation.
1801 //
1802 //need to see if there is (1) a promise for thread write, (2)
1803 //the promise is sb before write, (3) the promise can only be
1804 //resolved by the thread read, and (4) the promise has same
1805 //location as read/write
1806
1807 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1808         thread_id_t write_tid=write->get_tid();
1809         for(unsigned int i = promises.size(); i>0; i--) {
1810                 Promise *pr=promises[i-1];
1811                 if (!pr->same_location(write))
1812                         continue;
1813                 //the reading thread is the only thread that can resolve the promise
1814                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1815                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1816                                 ModelAction *prreader=pr->get_reader(j);
1817                                 //the writing thread reads from the promise before the write
1818                                 if (prreader->get_tid()==write_tid &&
1819                                                 (*prreader)<(*write)) {
1820                                         if ((*read)>(*prreader)) {
1821                                                 //check that we don't have a read between the read and promise
1822                                                 //from the same thread as read
1823                                                 bool okay=false;
1824                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1825                                                         tmp=tmp->get_node()->get_parent()->get_action();
1826                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1827                                                                 okay=true;
1828                                                                 break;
1829                                                         }
1830                                                 }
1831                                                 if (okay)
1832                                                         continue;
1833                                         }
1834                                         return false;
1835                                 }
1836                         }
1837                 }
1838         }
1839         return true;
1840 }
1841
1842
1843 /** Arbitrary reads from the future are not allowed.  Section 29.3
1844  * part 9 places some constraints.  This method checks one result of constraint
1845  * constraint.  Others require compiler support. */
1846 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1847 {
1848         if (!writer->is_rmw())
1849                 return true;
1850
1851         if (!reader->is_rmw())
1852                 return true;
1853
1854         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1855                 if (search == reader)
1856                         return false;
1857                 if (search->get_tid() == reader->get_tid() &&
1858                                 search->happens_before(reader))
1859                         break;
1860         }
1861
1862         return true;
1863 }
1864
1865 /**
1866  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1867  * some constraints. This method checks one the following constraint (others
1868  * require compiler support):
1869  *
1870  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1871  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1872  */
1873 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1874 {
1875         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1876         unsigned int i;
1877         /* Iterate over all threads */
1878         for (i = 0; i < thrd_lists->size(); i++) {
1879                 const ModelAction *write_after_read = NULL;
1880
1881                 /* Iterate over actions in thread, starting from most recent */
1882                 action_list_t *list = &(*thrd_lists)[i];
1883                 action_list_t::reverse_iterator rit;
1884                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1885                         ModelAction *act = *rit;
1886
1887                         /* Don't disallow due to act == reader */
1888                         if (!reader->happens_before(act) || reader == act)
1889                                 break;
1890                         else if (act->is_write())
1891                                 write_after_read = act;
1892                         else if (act->is_read() && act->get_reads_from() != NULL)
1893                                 write_after_read = act->get_reads_from();
1894                 }
1895
1896                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1897                         return false;
1898         }
1899         return true;
1900 }
1901
1902 /**
1903  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1904  * The ModelAction under consideration is expected to be taking part in
1905  * release/acquire synchronization as an object of the "reads from" relation.
1906  * Note that this can only provide release sequence support for RMW chains
1907  * which do not read from the future, as those actions cannot be traced until
1908  * their "promise" is fulfilled. Similarly, we may not even establish the
1909  * presence of a release sequence with certainty, as some modification order
1910  * constraints may be decided further in the future. Thus, this function
1911  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1912  * and a boolean representing certainty.
1913  *
1914  * @param rf The action that might be part of a release sequence. Must be a
1915  * write.
1916  * @param release_heads A pass-by-reference style return parameter. After
1917  * execution of this function, release_heads will contain the heads of all the
1918  * relevant release sequences, if any exists with certainty
1919  * @param pending A pass-by-reference style return parameter which is only used
1920  * when returning false (i.e., uncertain). Returns most information regarding
1921  * an uncertain release sequence, including any write operations that might
1922  * break the sequence.
1923  * @return true, if the ModelExecution is certain that release_heads is complete;
1924  * false otherwise
1925  */
1926 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1927                 rel_heads_list_t *release_heads,
1928                 struct release_seq *pending) const
1929 {
1930         /* Only check for release sequences if there are no cycles */
1931         if (mo_graph->checkForCycles())
1932                 return false;
1933
1934         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1935                 ASSERT(rf->is_write());
1936
1937                 if (rf->is_release())
1938                         release_heads->push_back(rf);
1939                 else if (rf->get_last_fence_release())
1940                         release_heads->push_back(rf->get_last_fence_release());
1941                 if (!rf->is_rmw())
1942                         break; /* End of RMW chain */
1943
1944                 /** @todo Need to be smarter here...  In the linux lock
1945                  * example, this will run to the beginning of the program for
1946                  * every acquire. */
1947                 /** @todo The way to be smarter here is to keep going until 1
1948                  * thread has a release preceded by an acquire and you've seen
1949                  *       both. */
1950
1951                 /* acq_rel RMW is a sufficient stopping condition */
1952                 if (rf->is_acquire() && rf->is_release())
1953                         return true; /* complete */
1954         };
1955         if (!rf) {
1956                 /* read from future: need to settle this later */
1957                 pending->rf = NULL;
1958                 return false; /* incomplete */
1959         }
1960
1961         if (rf->is_release())
1962                 return true; /* complete */
1963
1964         /* else relaxed write
1965          * - check for fence-release in the same thread (29.8, stmt. 3)
1966          * - check modification order for contiguous subsequence
1967          *   -> rf must be same thread as release */
1968
1969         const ModelAction *fence_release = rf->get_last_fence_release();
1970         /* Synchronize with a fence-release unconditionally; we don't need to
1971          * find any more "contiguous subsequence..." for it */
1972         if (fence_release)
1973                 release_heads->push_back(fence_release);
1974
1975         int tid = id_to_int(rf->get_tid());
1976         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1977         action_list_t *list = &(*thrd_lists)[tid];
1978         action_list_t::const_reverse_iterator rit;
1979
1980         /* Find rf in the thread list */
1981         rit = std::find(list->rbegin(), list->rend(), rf);
1982         ASSERT(rit != list->rend());
1983
1984         /* Find the last {write,fence}-release */
1985         for (; rit != list->rend(); rit++) {
1986                 if (fence_release && *(*rit) < *fence_release)
1987                         break;
1988                 if ((*rit)->is_release())
1989                         break;
1990         }
1991         if (rit == list->rend()) {
1992                 /* No write-release in this thread */
1993                 return true; /* complete */
1994         } else if (fence_release && *(*rit) < *fence_release) {
1995                 /* The fence-release is more recent (and so, "stronger") than
1996                  * the most recent write-release */
1997                 return true; /* complete */
1998         } /* else, need to establish contiguous release sequence */
1999         ModelAction *release = *rit;
2000
2001         ASSERT(rf->same_thread(release));
2002
2003         pending->writes.clear();
2004
2005         bool certain = true;
2006         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2007                 if (id_to_int(rf->get_tid()) == (int)i)
2008                         continue;
2009                 list = &(*thrd_lists)[i];
2010
2011                 /* Can we ensure no future writes from this thread may break
2012                  * the release seq? */
2013                 bool future_ordered = false;
2014
2015                 ModelAction *last = get_last_action(int_to_id(i));
2016                 Thread *th = get_thread(int_to_id(i));
2017                 if ((last && rf->happens_before(last)) ||
2018                                 !is_enabled(th) ||
2019                                 th->is_complete())
2020                         future_ordered = true;
2021
2022                 ASSERT(!th->is_model_thread() || future_ordered);
2023
2024                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2025                         const ModelAction *act = *rit;
2026                         /* Reach synchronization -> this thread is complete */
2027                         if (act->happens_before(release))
2028                                 break;
2029                         if (rf->happens_before(act)) {
2030                                 future_ordered = true;
2031                                 continue;
2032                         }
2033
2034                         /* Only non-RMW writes can break release sequences */
2035                         if (!act->is_write() || act->is_rmw())
2036                                 continue;
2037
2038                         /* Check modification order */
2039                         if (mo_graph->checkReachable(rf, act)) {
2040                                 /* rf --mo--> act */
2041                                 future_ordered = true;
2042                                 continue;
2043                         }
2044                         if (mo_graph->checkReachable(act, release))
2045                                 /* act --mo--> release */
2046                                 break;
2047                         if (mo_graph->checkReachable(release, act) &&
2048                                       mo_graph->checkReachable(act, rf)) {
2049                                 /* release --mo-> act --mo--> rf */
2050                                 return true; /* complete */
2051                         }
2052                         /* act may break release sequence */
2053                         pending->writes.push_back(act);
2054                         certain = false;
2055                 }
2056                 if (!future_ordered)
2057                         certain = false; /* This thread is uncertain */
2058         }
2059
2060         if (certain) {
2061                 release_heads->push_back(release);
2062                 pending->writes.clear();
2063         } else {
2064                 pending->release = release;
2065                 pending->rf = rf;
2066         }
2067         return certain;
2068 }
2069
2070 /**
2071  * An interface for getting the release sequence head(s) with which a
2072  * given ModelAction must synchronize. This function only returns a non-empty
2073  * result when it can locate a release sequence head with certainty. Otherwise,
2074  * it may mark the internal state of the ModelExecution so that it will handle
2075  * the release sequence at a later time, causing @a acquire to update its
2076  * synchronization at some later point in execution.
2077  *
2078  * @param acquire The 'acquire' action that may synchronize with a release
2079  * sequence
2080  * @param read The read action that may read from a release sequence; this may
2081  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2082  * when 'acquire' is a fence-acquire)
2083  * @param release_heads A pass-by-reference return parameter. Will be filled
2084  * with the head(s) of the release sequence(s), if they exists with certainty.
2085  * @see ModelExecution::release_seq_heads
2086  */
2087 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2088                 ModelAction *read, rel_heads_list_t *release_heads)
2089 {
2090         const ModelAction *rf = read->get_reads_from();
2091         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2092         sequence->acquire = acquire;
2093         sequence->read = read;
2094
2095         if (!release_seq_heads(rf, release_heads, sequence)) {
2096                 /* add act to 'lazy checking' list */
2097                 pending_rel_seqs.push_back(sequence);
2098         } else {
2099                 snapshot_free(sequence);
2100         }
2101 }
2102
2103 /**
2104  * @brief Propagate a modified clock vector to actions later in the execution
2105  * order
2106  *
2107  * After an acquire operation lazily completes a release-sequence
2108  * synchronization, we must update all clock vectors for operations later than
2109  * the acquire in the execution order.
2110  *
2111  * @param acquire The ModelAction whose clock vector must be propagated
2112  * @param work The work queue to which we can add work items, if this
2113  * propagation triggers more updates (e.g., to the modification order)
2114  */
2115 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2116 {
2117         /* Re-check all pending release sequences */
2118         work->push_back(CheckRelSeqWorkEntry(NULL));
2119         /* Re-check read-acquire for mo_graph edges */
2120         work->push_back(MOEdgeWorkEntry(acquire));
2121
2122         /* propagate synchronization to later actions */
2123         action_list_t::reverse_iterator rit = action_trace.rbegin();
2124         for (; (*rit) != acquire; rit++) {
2125                 ModelAction *propagate = *rit;
2126                 if (acquire->happens_before(propagate)) {
2127                         synchronize(acquire, propagate);
2128                         /* Re-check 'propagate' for mo_graph edges */
2129                         work->push_back(MOEdgeWorkEntry(propagate));
2130                 }
2131         }
2132 }
2133
2134 /**
2135  * Attempt to resolve all stashed operations that might synchronize with a
2136  * release sequence for a given location. This implements the "lazy" portion of
2137  * determining whether or not a release sequence was contiguous, since not all
2138  * modification order information is present at the time an action occurs.
2139  *
2140  * @param location The location/object that should be checked for release
2141  * sequence resolutions. A NULL value means to check all locations.
2142  * @param work_queue The work queue to which to add work items as they are
2143  * generated
2144  * @return True if any updates occurred (new synchronization, new mo_graph
2145  * edges)
2146  */
2147 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2148 {
2149         bool updated = false;
2150         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2151         while (it != pending_rel_seqs.end()) {
2152                 struct release_seq *pending = *it;
2153                 ModelAction *acquire = pending->acquire;
2154                 const ModelAction *read = pending->read;
2155
2156                 /* Only resolve sequences on the given location, if provided */
2157                 if (location && read->get_location() != location) {
2158                         it++;
2159                         continue;
2160                 }
2161
2162                 const ModelAction *rf = read->get_reads_from();
2163                 rel_heads_list_t release_heads;
2164                 bool complete;
2165                 complete = release_seq_heads(rf, &release_heads, pending);
2166                 for (unsigned int i = 0; i < release_heads.size(); i++)
2167                         if (!acquire->has_synchronized_with(release_heads[i]))
2168                                 if (synchronize(release_heads[i], acquire))
2169                                         updated = true;
2170
2171                 if (updated) {
2172                         /* Propagate the changed clock vector */
2173                         propagate_clockvector(acquire, work_queue);
2174                 }
2175                 if (complete) {
2176                         it = pending_rel_seqs.erase(it);
2177                         snapshot_free(pending);
2178                 } else {
2179                         it++;
2180                 }
2181         }
2182
2183         // If we resolved promises or data races, see if we have realized a data race.
2184         checkDataRaces();
2185
2186         return updated;
2187 }
2188
2189 /**
2190  * Performs various bookkeeping operations for the current ModelAction. For
2191  * instance, adds action to the per-object, per-thread action vector and to the
2192  * action trace list of all thread actions.
2193  *
2194  * @param act is the ModelAction to add.
2195  */
2196 void ModelExecution::add_action_to_lists(ModelAction *act)
2197 {
2198         int tid = id_to_int(act->get_tid());
2199         ModelAction *uninit = NULL;
2200         int uninit_id = -1;
2201         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2202         if (list->empty() && act->is_atomic_var()) {
2203                 uninit = get_uninitialized_action(act);
2204                 uninit_id = id_to_int(uninit->get_tid());
2205                 list->push_front(uninit);
2206         }
2207         list->push_back(act);
2208
2209         action_trace.push_back(act);
2210         if (uninit)
2211                 action_trace.push_front(uninit);
2212
2213         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2214         if (tid >= (int)vec->size())
2215                 vec->resize(priv->next_thread_id);
2216         (*vec)[tid].push_back(act);
2217         if (uninit)
2218                 (*vec)[uninit_id].push_front(uninit);
2219
2220         if ((int)thrd_last_action.size() <= tid)
2221                 thrd_last_action.resize(get_num_threads());
2222         thrd_last_action[tid] = act;
2223         if (uninit)
2224                 thrd_last_action[uninit_id] = uninit;
2225
2226         if (act->is_fence() && act->is_release()) {
2227                 if ((int)thrd_last_fence_release.size() <= tid)
2228                         thrd_last_fence_release.resize(get_num_threads());
2229                 thrd_last_fence_release[tid] = act;
2230         }
2231
2232         if (act->is_wait()) {
2233                 void *mutex_loc = (void *) act->get_value();
2234                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2235
2236                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2237                 if (tid >= (int)vec->size())
2238                         vec->resize(priv->next_thread_id);
2239                 (*vec)[tid].push_back(act);
2240         }
2241 }
2242
2243 /**
2244  * @brief Get the last action performed by a particular Thread
2245  * @param tid The thread ID of the Thread in question
2246  * @return The last action in the thread
2247  */
2248 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2249 {
2250         int threadid = id_to_int(tid);
2251         if (threadid < (int)thrd_last_action.size())
2252                 return thrd_last_action[id_to_int(tid)];
2253         else
2254                 return NULL;
2255 }
2256
2257 /**
2258  * @brief Get the last fence release performed by a particular Thread
2259  * @param tid The thread ID of the Thread in question
2260  * @return The last fence release in the thread, if one exists; NULL otherwise
2261  */
2262 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2263 {
2264         int threadid = id_to_int(tid);
2265         if (threadid < (int)thrd_last_fence_release.size())
2266                 return thrd_last_fence_release[id_to_int(tid)];
2267         else
2268                 return NULL;
2269 }
2270
2271 /**
2272  * Gets the last memory_order_seq_cst write (in the total global sequence)
2273  * performed on a particular object (i.e., memory location), not including the
2274  * current action.
2275  * @param curr The current ModelAction; also denotes the object location to
2276  * check
2277  * @return The last seq_cst write
2278  */
2279 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2280 {
2281         void *location = curr->get_location();
2282         action_list_t *list = obj_map.get(location);
2283         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2284         action_list_t::reverse_iterator rit;
2285         for (rit = list->rbegin(); (*rit) != curr; rit++)
2286                 ;
2287         rit++; /* Skip past curr */
2288         for ( ; rit != list->rend(); rit++)
2289                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2290                         return *rit;
2291         return NULL;
2292 }
2293
2294 /**
2295  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2296  * performed in a particular thread, prior to a particular fence.
2297  * @param tid The ID of the thread to check
2298  * @param before_fence The fence from which to begin the search; if NULL, then
2299  * search for the most recent fence in the thread.
2300  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2301  */
2302 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2303 {
2304         /* All fences should have location FENCE_LOCATION */
2305         action_list_t *list = obj_map.get(FENCE_LOCATION);
2306
2307         if (!list)
2308                 return NULL;
2309
2310         action_list_t::reverse_iterator rit = list->rbegin();
2311
2312         if (before_fence) {
2313                 for (; rit != list->rend(); rit++)
2314                         if (*rit == before_fence)
2315                                 break;
2316
2317                 ASSERT(*rit == before_fence);
2318                 rit++;
2319         }
2320
2321         for (; rit != list->rend(); rit++)
2322                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2323                         return *rit;
2324         return NULL;
2325 }
2326
2327 /**
2328  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2329  * location). This function identifies the mutex according to the current
2330  * action, which is presumed to perform on the same mutex.
2331  * @param curr The current ModelAction; also denotes the object location to
2332  * check
2333  * @return The last unlock operation
2334  */
2335 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2336 {
2337         void *location = curr->get_location();
2338         action_list_t *list = obj_map.get(location);
2339         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2340         action_list_t::reverse_iterator rit;
2341         for (rit = list->rbegin(); rit != list->rend(); rit++)
2342                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2343                         return *rit;
2344         return NULL;
2345 }
2346
2347 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2348 {
2349         ModelAction *parent = get_last_action(tid);
2350         if (!parent)
2351                 parent = get_thread(tid)->get_creation();
2352         return parent;
2353 }
2354
2355 /**
2356  * Returns the clock vector for a given thread.
2357  * @param tid The thread whose clock vector we want
2358  * @return Desired clock vector
2359  */
2360 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2361 {
2362         return get_parent_action(tid)->get_cv();
2363 }
2364
2365 /**
2366  * @brief Find the promise (if any) to resolve for the current action and
2367  * remove it from the pending promise vector
2368  * @param curr The current ModelAction. Should be a write.
2369  * @return The Promise to resolve, if any; otherwise NULL
2370  */
2371 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2372 {
2373         for (unsigned int i = 0; i < promises.size(); i++)
2374                 if (curr->get_node()->get_promise(i)) {
2375                         Promise *ret = promises[i];
2376                         promises.erase(promises.begin() + i);
2377                         return ret;
2378                 }
2379         return NULL;
2380 }
2381
2382 /**
2383  * Resolve a Promise with a current write.
2384  * @param write The ModelAction that is fulfilling Promises
2385  * @param promise The Promise to resolve
2386  * @param work The work queue, for adding new fixup work
2387  * @return True if the Promise was successfully resolved; false otherwise
2388  */
2389 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2390                 work_queue_t *work)
2391 {
2392         ModelVector<ModelAction *> actions_to_check;
2393
2394         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2395                 ModelAction *read = promise->get_reader(i);
2396                 if (read_from(read, write)) {
2397                         /* Propagate the changed clock vector */
2398                         propagate_clockvector(read, work);
2399                 }
2400                 actions_to_check.push_back(read);
2401         }
2402         /* Make sure the promise's value matches the write's value */
2403         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2404         if (!mo_graph->resolvePromise(promise, write))
2405                 priv->hard_failed_promise = true;
2406
2407         /**
2408          * @todo  It is possible to end up in an inconsistent state, where a
2409          * "resolved" promise may still be referenced if
2410          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2411          *
2412          * Note that the inconsistency only matters when dumping mo_graph to
2413          * file.
2414          *
2415          * delete promise;
2416          */
2417
2418         //Check whether reading these writes has made threads unable to
2419         //resolve promises
2420         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2421                 ModelAction *read = actions_to_check[i];
2422                 mo_check_promises(read, true);
2423         }
2424
2425         return true;
2426 }
2427
2428 /**
2429  * Compute the set of promises that could potentially be satisfied by this
2430  * action. Note that the set computation actually appears in the Node, not in
2431  * ModelExecution.
2432  * @param curr The ModelAction that may satisfy promises
2433  */
2434 void ModelExecution::compute_promises(ModelAction *curr)
2435 {
2436         for (unsigned int i = 0; i < promises.size(); i++) {
2437                 Promise *promise = promises[i];
2438                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2439                         continue;
2440
2441                 bool satisfy = true;
2442                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2443                         const ModelAction *act = promise->get_reader(j);
2444                         if (act->happens_before(curr) ||
2445                                         act->could_synchronize_with(curr)) {
2446                                 satisfy = false;
2447                                 break;
2448                         }
2449                 }
2450                 if (satisfy)
2451                         curr->get_node()->set_promise(i);
2452         }
2453 }
2454
2455 /** Checks promises in response to change in ClockVector Threads. */
2456 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2457 {
2458         for (unsigned int i = 0; i < promises.size(); i++) {
2459                 Promise *promise = promises[i];
2460                 if (!promise->thread_is_available(tid))
2461                         continue;
2462                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2463                         const ModelAction *act = promise->get_reader(j);
2464                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2465                                         merge_cv->synchronized_since(act)) {
2466                                 if (promise->eliminate_thread(tid)) {
2467                                         /* Promise has failed */
2468                                         priv->failed_promise = true;
2469                                         return;
2470                                 }
2471                         }
2472                 }
2473         }
2474 }
2475
2476 void ModelExecution::check_promises_thread_disabled()
2477 {
2478         for (unsigned int i = 0; i < promises.size(); i++) {
2479                 Promise *promise = promises[i];
2480                 if (promise->has_failed()) {
2481                         priv->failed_promise = true;
2482                         return;
2483                 }
2484         }
2485 }
2486
2487 /**
2488  * @brief Checks promises in response to addition to modification order for
2489  * threads.
2490  *
2491  * We test whether threads are still available for satisfying promises after an
2492  * addition to our modification order constraints. Those that are unavailable
2493  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2494  * that promise has failed.
2495  *
2496  * @param act The ModelAction which updated the modification order
2497  * @param is_read_check Should be true if act is a read and we must check for
2498  * updates to the store from which it read (there is a distinction here for
2499  * RMW's, which are both a load and a store)
2500  */
2501 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2502 {
2503         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2504
2505         for (unsigned int i = 0; i < promises.size(); i++) {
2506                 Promise *promise = promises[i];
2507
2508                 // Is this promise on the same location?
2509                 if (!promise->same_location(write))
2510                         continue;
2511
2512                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2513                         const ModelAction *pread = promise->get_reader(j);
2514                         if (!pread->happens_before(act))
2515                                continue;
2516                         if (mo_graph->checkPromise(write, promise)) {
2517                                 priv->hard_failed_promise = true;
2518                                 return;
2519                         }
2520                         break;
2521                 }
2522
2523                 // Don't do any lookups twice for the same thread
2524                 if (!promise->thread_is_available(act->get_tid()))
2525                         continue;
2526
2527                 if (mo_graph->checkReachable(promise, write)) {
2528                         if (mo_graph->checkPromise(write, promise)) {
2529                                 priv->hard_failed_promise = true;
2530                                 return;
2531                         }
2532                 }
2533         }
2534 }
2535
2536 /**
2537  * Compute the set of writes that may break the current pending release
2538  * sequence. This information is extracted from previou release sequence
2539  * calculations.
2540  *
2541  * @param curr The current ModelAction. Must be a release sequence fixup
2542  * action.
2543  */
2544 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2545 {
2546         if (pending_rel_seqs.empty())
2547                 return;
2548
2549         struct release_seq *pending = pending_rel_seqs.back();
2550         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2551                 const ModelAction *write = pending->writes[i];
2552                 curr->get_node()->add_relseq_break(write);
2553         }
2554
2555         /* NULL means don't break the sequence; just synchronize */
2556         curr->get_node()->add_relseq_break(NULL);
2557 }
2558
2559 /**
2560  * Build up an initial set of all past writes that this 'read' action may read
2561  * from, as well as any previously-observed future values that must still be valid.
2562  *
2563  * @param curr is the current ModelAction that we are exploring; it must be a
2564  * 'read' operation.
2565  */
2566 void ModelExecution::build_may_read_from(ModelAction *curr)
2567 {
2568         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2569         unsigned int i;
2570         ASSERT(curr->is_read());
2571
2572         ModelAction *last_sc_write = NULL;
2573
2574         if (curr->is_seqcst())
2575                 last_sc_write = get_last_seq_cst_write(curr);
2576
2577         /* Iterate over all threads */
2578         for (i = 0; i < thrd_lists->size(); i++) {
2579                 /* Iterate over actions in thread, starting from most recent */
2580                 action_list_t *list = &(*thrd_lists)[i];
2581                 action_list_t::reverse_iterator rit;
2582                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2583                         ModelAction *act = *rit;
2584
2585                         /* Only consider 'write' actions */
2586                         if (!act->is_write() || act == curr)
2587                                 continue;
2588
2589                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2590                         bool allow_read = true;
2591
2592                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2593                                 allow_read = false;
2594                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2595                                 allow_read = false;
2596
2597                         if (allow_read) {
2598                                 /* Only add feasible reads */
2599                                 mo_graph->startChanges();
2600                                 r_modification_order(curr, act);
2601                                 if (!is_infeasible())
2602                                         curr->get_node()->add_read_from_past(act);
2603                                 mo_graph->rollbackChanges();
2604                         }
2605
2606                         /* Include at most one act per-thread that "happens before" curr */
2607                         if (act->happens_before(curr))
2608                                 break;
2609                 }
2610         }
2611
2612         /* Inherit existing, promised future values */
2613         for (i = 0; i < promises.size(); i++) {
2614                 const Promise *promise = promises[i];
2615                 const ModelAction *promise_read = promise->get_reader(0);
2616                 if (promise_read->same_var(curr)) {
2617                         /* Only add feasible future-values */
2618                         mo_graph->startChanges();
2619                         r_modification_order(curr, promise);
2620                         if (!is_infeasible())
2621                                 curr->get_node()->add_read_from_promise(promise_read);
2622                         mo_graph->rollbackChanges();
2623                 }
2624         }
2625
2626         /* We may find no valid may-read-from only if the execution is doomed */
2627         if (!curr->get_node()->read_from_size()) {
2628                 priv->no_valid_reads = true;
2629                 set_assert();
2630         }
2631
2632         if (DBG_ENABLED()) {
2633                 model_print("Reached read action:\n");
2634                 curr->print();
2635                 model_print("Printing read_from_past\n");
2636                 curr->get_node()->print_read_from_past();
2637                 model_print("End printing read_from_past\n");
2638         }
2639 }
2640
2641 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2642 {
2643         for ( ; write != NULL; write = write->get_reads_from()) {
2644                 /* UNINIT actions don't have a Node, and they never sleep */
2645                 if (write->is_uninitialized())
2646                         return true;
2647                 Node *prevnode = write->get_node()->get_parent();
2648
2649                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2650                 if (write->is_release() && thread_sleep)
2651                         return true;
2652                 if (!write->is_rmw())
2653                         return false;
2654         }
2655         return true;
2656 }
2657
2658 /**
2659  * @brief Get an action representing an uninitialized atomic
2660  *
2661  * This function may create a new one or try to retrieve one from the NodeStack
2662  *
2663  * @param curr The current action, which prompts the creation of an UNINIT action
2664  * @return A pointer to the UNINIT ModelAction
2665  */
2666 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2667 {
2668         Node *node = curr->get_node();
2669         ModelAction *act = node->get_uninit_action();
2670         if (!act) {
2671                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2672                 node->set_uninit_action(act);
2673         }
2674         act->create_cv(NULL);
2675         return act;
2676 }
2677
2678 static void print_list(const action_list_t *list)
2679 {
2680         action_list_t::const_iterator it;
2681
2682         model_print("------------------------------------------------------------------------------------\n");
2683         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2684         model_print("------------------------------------------------------------------------------------\n");
2685
2686         unsigned int hash = 0;
2687
2688         for (it = list->begin(); it != list->end(); it++) {
2689                 const ModelAction *act = *it;
2690                 if (act->get_seq_number() > 0)
2691                         act->print();
2692                 hash = hash^(hash<<3)^((*it)->hash());
2693         }
2694         model_print("HASH %u\n", hash);
2695         model_print("------------------------------------------------------------------------------------\n");
2696 }
2697
2698 #if SUPPORT_MOD_ORDER_DUMP
2699 void ModelExecution::dumpGraph(char *filename) const
2700 {
2701         char buffer[200];
2702         sprintf(buffer, "%s.dot", filename);
2703         FILE *file = fopen(buffer, "w");
2704         fprintf(file, "digraph %s {\n", filename);
2705         mo_graph->dumpNodes(file);
2706         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2707
2708         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2709                 ModelAction *act = *it;
2710                 if (act->is_read()) {
2711                         mo_graph->dot_print_node(file, act);
2712                         if (act->get_reads_from())
2713                                 mo_graph->dot_print_edge(file,
2714                                                 act->get_reads_from(),
2715                                                 act,
2716                                                 "label=\"rf\", color=red, weight=2");
2717                         else
2718                                 mo_graph->dot_print_edge(file,
2719                                                 act->get_reads_from_promise(),
2720                                                 act,
2721                                                 "label=\"rf\", color=red");
2722                 }
2723                 if (thread_array[act->get_tid()]) {
2724                         mo_graph->dot_print_edge(file,
2725                                         thread_array[id_to_int(act->get_tid())],
2726                                         act,
2727                                         "label=\"sb\", color=blue, weight=400");
2728                 }
2729
2730                 thread_array[act->get_tid()] = act;
2731         }
2732         fprintf(file, "}\n");
2733         model_free(thread_array);
2734         fclose(file);
2735 }
2736 #endif
2737
2738 /** @brief Prints an execution trace summary. */
2739 void ModelExecution::print_summary() const
2740 {
2741 #if SUPPORT_MOD_ORDER_DUMP
2742         char buffername[100];
2743         sprintf(buffername, "exec%04u", get_execution_number());
2744         mo_graph->dumpGraphToFile(buffername);
2745         sprintf(buffername, "graph%04u", get_execution_number());
2746         dumpGraph(buffername);
2747 #endif
2748
2749         model_print("Execution trace %d:", get_execution_number());
2750         if (isfeasibleprefix()) {
2751                 if (is_yieldblocked())
2752                         model_print(" YIELD BLOCKED");
2753                 if (scheduler->all_threads_sleeping())
2754                         model_print(" SLEEP-SET REDUNDANT");
2755                 if (have_bug_reports())
2756                         model_print(" DETECTED BUG(S)");
2757         } else
2758                 print_infeasibility(" INFEASIBLE");
2759         model_print("\n");
2760
2761         print_list(&action_trace);
2762         model_print("\n");
2763
2764         if (!promises.empty()) {
2765                 model_print("Pending promises:\n");
2766                 for (unsigned int i = 0; i < promises.size(); i++) {
2767                         model_print(" [P%u] ", i);
2768                         promises[i]->print();
2769                 }
2770                 model_print("\n");
2771         }
2772 }
2773
2774 /**
2775  * Add a Thread to the system for the first time. Should only be called once
2776  * per thread.
2777  * @param t The Thread to add
2778  */
2779 void ModelExecution::add_thread(Thread *t)
2780 {
2781         unsigned int i = id_to_int(t->get_id());
2782         if (i >= thread_map.size())
2783                 thread_map.resize(i + 1);
2784         thread_map[i] = t;
2785         if (!t->is_model_thread())
2786                 scheduler->add_thread(t);
2787 }
2788
2789 /**
2790  * @brief Get a Thread reference by its ID
2791  * @param tid The Thread's ID
2792  * @return A Thread reference
2793  */
2794 Thread * ModelExecution::get_thread(thread_id_t tid) const
2795 {
2796         unsigned int i = id_to_int(tid);
2797         if (i < thread_map.size())
2798                 return thread_map[i];
2799         return NULL;
2800 }
2801
2802 /**
2803  * @brief Get a reference to the Thread in which a ModelAction was executed
2804  * @param act The ModelAction
2805  * @return A Thread reference
2806  */
2807 Thread * ModelExecution::get_thread(const ModelAction *act) const
2808 {
2809         return get_thread(act->get_tid());
2810 }
2811
2812 /**
2813  * @brief Get a Promise's "promise number"
2814  *
2815  * A "promise number" is an index number that is unique to a promise, valid
2816  * only for a specific snapshot of an execution trace. Promises may come and go
2817  * as they are generated an resolved, so an index only retains meaning for the
2818  * current snapshot.
2819  *
2820  * @param promise The Promise to check
2821  * @return The promise index, if the promise still is valid; otherwise -1
2822  */
2823 int ModelExecution::get_promise_number(const Promise *promise) const
2824 {
2825         for (unsigned int i = 0; i < promises.size(); i++)
2826                 if (promises[i] == promise)
2827                         return i;
2828         /* Not found */
2829         return -1;
2830 }
2831
2832 /**
2833  * @brief Check if a Thread is currently enabled
2834  * @param t The Thread to check
2835  * @return True if the Thread is currently enabled
2836  */
2837 bool ModelExecution::is_enabled(Thread *t) const
2838 {
2839         return scheduler->is_enabled(t);
2840 }
2841
2842 /**
2843  * @brief Check if a Thread is currently enabled
2844  * @param tid The ID of the Thread to check
2845  * @return True if the Thread is currently enabled
2846  */
2847 bool ModelExecution::is_enabled(thread_id_t tid) const
2848 {
2849         return scheduler->is_enabled(tid);
2850 }
2851
2852 /**
2853  * @brief Select the next thread to execute based on the curren action
2854  *
2855  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2856  * actions should be followed by the execution of their child thread. In either
2857  * case, the current action should determine the next thread schedule.
2858  *
2859  * @param curr The current action
2860  * @return The next thread to run, if the current action will determine this
2861  * selection; otherwise NULL
2862  */
2863 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2864 {
2865         /* Do not split atomic RMW */
2866         if (curr->is_rmwr())
2867                 return get_thread(curr);
2868         /* Follow CREATE with the created thread */
2869         if (curr->get_type() == THREAD_CREATE)
2870                 return curr->get_thread_operand();
2871         return NULL;
2872 }
2873
2874 /** @return True if the execution has taken too many steps */
2875 bool ModelExecution::too_many_steps() const
2876 {
2877         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2878 }
2879
2880 /**
2881  * Takes the next step in the execution, if possible.
2882  * @param curr The current step to take
2883  * @return Returns the next Thread to run, if any; NULL if this execution
2884  * should terminate
2885  */
2886 Thread * ModelExecution::take_step(ModelAction *curr)
2887 {
2888         Thread *curr_thrd = get_thread(curr);
2889         ASSERT(curr_thrd->get_state() == THREAD_READY);
2890
2891         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2892         curr = check_current_action(curr);
2893         ASSERT(curr);
2894
2895         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2896                 scheduler->remove_thread(curr_thrd);
2897
2898         return action_select_next_thread(curr);
2899 }
2900
2901 /**
2902  * Launch end-of-execution release sequence fixups only when
2903  * the execution is otherwise feasible AND there are:
2904  *
2905  * (1) pending release sequences
2906  * (2) pending assertions that could be invalidated by a change
2907  * in clock vectors (i.e., data races)
2908  * (3) no pending promises
2909  */
2910 void ModelExecution::fixup_release_sequences()
2911 {
2912         while (!pending_rel_seqs.empty() &&
2913                         is_feasible_prefix_ignore_relseq() &&
2914                         haveUnrealizedRaces()) {
2915                 model_print("*** WARNING: release sequence fixup action "
2916                                 "(%zu pending release seuqence(s)) ***\n",
2917                                 pending_rel_seqs.size());
2918                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2919                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2920                                 model_thread);
2921                 take_step(fixup);
2922         };
2923 }