87e5425286e20253bce18d11f219631351b60819
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #include <pthread.h>
21
22 #define INITIAL_THREAD_ID       0
23
24 /**
25  * Structure for holding small ModelChecker members that should be snapshotted
26  */
27 struct model_snapshot_members {
28         model_snapshot_members() :
29                 /* First thread created will have id INITIAL_THREAD_ID */
30                 next_thread_id(INITIAL_THREAD_ID),
31                 used_sequence_numbers(0),
32                 next_backtrack(NULL),
33                 bugs(),
34                 failed_promise(false),
35                 hard_failed_promise(false),
36                 too_many_reads(false),
37                 no_valid_reads(false),
38                 bad_synchronization(false),
39                 bad_sc_read(false),
40                 asserted(false)
41         { }
42
43         ~model_snapshot_members() {
44                 for (unsigned int i = 0; i < bugs.size(); i++)
45                         delete bugs[i];
46                 bugs.clear();
47         }
48
49         unsigned int next_thread_id;
50         modelclock_t used_sequence_numbers;
51         ModelAction *next_backtrack;
52         SnapVector<bug_message *> bugs;
53         bool failed_promise;
54         bool hard_failed_promise;
55         bool too_many_reads;
56         bool no_valid_reads;
57         /** @brief Incorrectly-ordered synchronization was made */
58         bool bad_synchronization;
59         bool bad_sc_read;
60         bool asserted;
61
62         SNAPSHOTALLOC
63 };
64
65 /** @brief Constructor */
66 ModelExecution::ModelExecution(ModelChecker *m,
67                 const struct model_params *params,
68                 Scheduler *scheduler,
69                 NodeStack *node_stack) :
70         model(m),
71         params(params),
72         scheduler(scheduler),
73         action_trace(),
74         thread_map(2), /* We'll always need at least 2 threads */
75         obj_map(),
76         condvar_waiters_map(),
77         obj_thrd_map(),
78         promises(),
79         futurevalues(),
80         pending_rel_seqs(),
81         thrd_last_action(1),
82         thrd_last_fence_release(),
83         node_stack(node_stack),
84         priv(new struct model_snapshot_members()),
85         mo_graph(new CycleGraph())
86 {
87         /* Initialize a model-checker thread, for special ModelActions */
88         model_thread = new Thread(get_next_id());       // L: Create model thread
89         add_thread(model_thread);                       // L: Add model thread to scheduler
90         scheduler->register_engine(this);
91         node_stack->register_engine(this);
92 }
93
94 /** @brief Destructor */
95 ModelExecution::~ModelExecution()
96 {
97         for (unsigned int i = 0; i < get_num_threads(); i++)
98                 delete get_thread(int_to_id(i));
99
100         for (unsigned int i = 0; i < promises.size(); i++)
101                 delete promises[i];
102
103         delete mo_graph;
104         delete priv;
105 }
106
107 int ModelExecution::get_execution_number() const
108 {
109         return model->get_execution_number();
110 }
111
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
113 {
114         action_list_t *tmp = hash->get(ptr);
115         if (tmp == NULL) {
116                 tmp = new action_list_t();
117                 hash->put(ptr, tmp);
118         }
119         return tmp;
120 }
121
122 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
123 {
124         SnapVector<action_list_t> *tmp = hash->get(ptr);
125         if (tmp == NULL) {
126                 tmp = new SnapVector<action_list_t>();
127                 hash->put(ptr, tmp);
128         }
129         return tmp;
130 }
131
132 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
133 {
134         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
135         if (wrv==NULL)
136                 return NULL;
137         unsigned int thread=id_to_int(tid);
138         if (thread < wrv->size())
139                 return &(*wrv)[thread];
140         else
141                 return NULL;
142 }
143
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelExecution::get_next_id()
146 {
147         return priv->next_thread_id++;
148 }
149
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelExecution::get_num_threads() const
152 {
153         return priv->next_thread_id;
154 }
155
156 /** @return a sequence number for a new ModelAction */
157 modelclock_t ModelExecution::get_next_seq_num()
158 {
159         return ++priv->used_sequence_numbers;
160 }
161
162 /**
163  * @brief Should the current action wake up a given thread?
164  *
165  * @param curr The current action
166  * @param thread The thread that we might wake up
167  * @return True, if we should wake up the sleeping thread; false otherwise
168  */
169 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
170 {
171         const ModelAction *asleep = thread->get_pending();
172         /* Don't allow partial RMW to wake anyone up */
173         if (curr->is_rmwr())
174                 return false;
175         /* Synchronizing actions may have been backtracked */
176         if (asleep->could_synchronize_with(curr))
177                 return true;
178         /* All acquire/release fences and fence-acquire/store-release */
179         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
180                 return true;
181         /* Fence-release + store can awake load-acquire on the same location */
182         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
183                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
184                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
185                         return true;
186         }
187         return false;
188 }
189
190 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
191 {
192         for (unsigned int i = 0; i < get_num_threads(); i++) {
193                 Thread *thr = get_thread(int_to_id(i));
194                 if (scheduler->is_sleep_set(thr)) {
195                         if (should_wake_up(curr, thr))
196                                 /* Remove this thread from sleep set */
197                                 scheduler->remove_sleep(thr);
198                 }
199         }
200 }
201
202 /** @brief Alert the model-checker that an incorrectly-ordered
203  * synchronization was made */
204 void ModelExecution::set_bad_synchronization()
205 {
206         priv->bad_synchronization = true;
207 }
208
209 /** @brief Alert the model-checker that an incorrectly-ordered
210  * synchronization was made */
211 void ModelExecution::set_bad_sc_read()
212 {
213         priv->bad_sc_read = true;
214 }
215
216 bool ModelExecution::assert_bug(const char *msg)
217 {
218         priv->bugs.push_back(new bug_message(msg));
219
220         if (isfeasibleprefix()) {
221                 set_assert();
222                 return true;
223         }
224         return false;
225 }
226
227 /** @return True, if any bugs have been reported for this execution */
228 bool ModelExecution::have_bug_reports() const
229 {
230         return priv->bugs.size() != 0;
231 }
232
233 SnapVector<bug_message *> * ModelExecution::get_bugs() const
234 {
235         return &priv->bugs;
236 }
237
238 /**
239  * Check whether the current trace has triggered an assertion which should halt
240  * its execution.
241  *
242  * @return True, if the execution should be aborted; false otherwise
243  */
244 bool ModelExecution::has_asserted() const
245 {
246         return priv->asserted;
247 }
248
249 /**
250  * Trigger a trace assertion which should cause this execution to be halted.
251  * This can be due to a detected bug or due to an infeasibility that should
252  * halt ASAP.
253  */
254 void ModelExecution::set_assert()
255 {
256         priv->asserted = true;
257 }
258
259 /**
260  * Check if we are in a deadlock. Should only be called at the end of an
261  * execution, although it should not give false positives in the middle of an
262  * execution (there should be some ENABLED thread).
263  *
264  * @return True if program is in a deadlock; false otherwise
265  */
266 bool ModelExecution::is_deadlocked() const
267 {
268         bool blocking_threads = false;
269         for (unsigned int i = 0; i < get_num_threads(); i++) {
270                 thread_id_t tid = int_to_id(i);
271                 if (is_enabled(tid))
272                         return false;
273                 Thread *t = get_thread(tid);
274                 if (!t->is_model_thread() && t->get_pending())
275                         blocking_threads = true;
276         }
277         return blocking_threads;
278 }
279
280 /**
281  * @brief Check if we are yield-blocked
282  *
283  * A program can be "yield-blocked" if all threads are ready to execute a
284  * yield.
285  *
286  * @return True if the program is yield-blocked; false otherwise
287  */
288 bool ModelExecution::is_yieldblocked() const
289 {
290         if (!params->yieldblock)
291                 return false;
292
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *t = get_thread(tid);
296                 if (t->get_pending() && t->get_pending()->is_yield())
297                         return true;
298         }
299         return false;
300 }
301
302 /**
303  * Check if this is a complete execution. That is, have all thread completed
304  * execution (rather than exiting because sleep sets have forced a redundant
305  * execution).
306  *
307  * @return True if the execution is complete.
308  */
309 bool ModelExecution::is_complete_execution() const
310 {
311         if (is_yieldblocked())
312                 return false;
313         for (unsigned int i = 0; i < get_num_threads(); i++)
314                 if (is_enabled(int_to_id(i)))
315                         return false;
316         return true;
317 }
318
319 /**
320  * @brief Find the last fence-related backtracking conflict for a ModelAction
321  *
322  * This function performs the search for the most recent conflicting action
323  * against which we should perform backtracking, as affected by fence
324  * operations. This includes pairs of potentially-synchronizing actions which
325  * occur due to fence-acquire or fence-release, and hence should be explored in
326  * the opposite execution order.
327  *
328  * @param act The current action
329  * @return The most recent action which conflicts with act due to fences
330  */
331 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
332 {
333         /* Only perform release/acquire fence backtracking for stores */
334         if (!act->is_write())
335                 return NULL;
336
337         /* Find a fence-release (or, act is a release) */
338         ModelAction *last_release;
339         if (act->is_release())
340                 last_release = act;
341         else
342                 last_release = get_last_fence_release(act->get_tid());
343         if (!last_release)
344                 return NULL;
345
346         /* Skip past the release */
347         const action_list_t *list = &action_trace;
348         action_list_t::const_reverse_iterator rit;
349         for (rit = list->rbegin(); rit != list->rend(); rit++)
350                 if (*rit == last_release)
351                         break;
352         ASSERT(rit != list->rend());
353
354         /* Find a prior:
355          *   load-acquire
356          * or
357          *   load --sb-> fence-acquire */
358         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
359         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
360         bool found_acquire_fences = false;
361         for ( ; rit != list->rend(); rit++) {
362                 ModelAction *prev = *rit;
363                 if (act->same_thread(prev))
364                         continue;
365
366                 int tid = id_to_int(prev->get_tid());
367
368                 if (prev->is_read() && act->same_var(prev)) {
369                         if (prev->is_acquire()) {
370                                 /* Found most recent load-acquire, don't need
371                                  * to search for more fences */
372                                 if (!found_acquire_fences)
373                                         return NULL;
374                         } else {
375                                 prior_loads[tid] = prev;
376                         }
377                 }
378                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
379                         found_acquire_fences = true;
380                         acquire_fences[tid] = prev;
381                 }
382         }
383
384         ModelAction *latest_backtrack = NULL;
385         for (unsigned int i = 0; i < acquire_fences.size(); i++)
386                 if (acquire_fences[i] && prior_loads[i])
387                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
388                                 latest_backtrack = acquire_fences[i];
389         return latest_backtrack;
390 }
391
392 /**
393  * @brief Find the last backtracking conflict for a ModelAction
394  *
395  * This function performs the search for the most recent conflicting action
396  * against which we should perform backtracking. This primary includes pairs of
397  * synchronizing actions which should be explored in the opposite execution
398  * order.
399  *
400  * @param act The current action
401  * @return The most recent action which conflicts with act
402  */
403 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
404 {
405         switch (act->get_type()) {
406         case ATOMIC_FENCE:
407                 /* Only seq-cst fences can (directly) cause backtracking */
408                 if (!act->is_seqcst())
409                         break;
410         case ATOMIC_READ:
411         case ATOMIC_WRITE:
412         case ATOMIC_RMW: {
413                 ModelAction *ret = NULL;
414
415                 /* linear search: from most recent to oldest */
416                 action_list_t *list = obj_map.get(act->get_location());
417                 action_list_t::reverse_iterator rit;
418                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
419                         ModelAction *prev = *rit;
420                         if (prev == act)
421                                 continue;
422                         if (prev->could_synchronize_with(act)) {
423                                 ret = prev;
424                                 break;
425                         }
426                 }
427
428                 ModelAction *ret2 = get_last_fence_conflict(act);
429                 if (!ret2)
430                         return ret;
431                 if (!ret)
432                         return ret2;
433                 if (*ret < *ret2)
434                         return ret2;
435                 return ret;
436         }
437         case ATOMIC_LOCK:
438         case ATOMIC_TRYLOCK: {
439                 /* linear search: from most recent to oldest */
440                 action_list_t *list = obj_map.get(act->get_location());
441                 action_list_t::reverse_iterator rit;
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *prev = *rit;
444                         if (act->is_conflicting_lock(prev))
445                                 return prev;
446                 }
447                 break;
448         }
449         case ATOMIC_UNLOCK: {
450                 /* linear search: from most recent to oldest */
451                 action_list_t *list = obj_map.get(act->get_location());
452                 action_list_t::reverse_iterator rit;
453                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454                         ModelAction *prev = *rit;
455                         if (!act->same_thread(prev) && prev->is_failed_trylock())
456                                 return prev;
457                 }
458                 break;
459         }
460         case ATOMIC_WAIT: {
461                 /* linear search: from most recent to oldest */
462                 action_list_t *list = obj_map.get(act->get_location());
463                 action_list_t::reverse_iterator rit;
464                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
465                         ModelAction *prev = *rit;
466                         if (!act->same_thread(prev) && prev->is_failed_trylock())
467                                 return prev;
468                         if (!act->same_thread(prev) && prev->is_notify())
469                                 return prev;
470                 }
471                 break;
472         }
473
474         case ATOMIC_NOTIFY_ALL:
475         case ATOMIC_NOTIFY_ONE: {
476                 /* linear search: from most recent to oldest */
477                 action_list_t *list = obj_map.get(act->get_location());
478                 action_list_t::reverse_iterator rit;
479                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
480                         ModelAction *prev = *rit;
481                         if (!act->same_thread(prev) && prev->is_wait())
482                                 return prev;
483                 }
484                 break;
485         }
486         default:
487                 break;
488         }
489         return NULL;
490 }
491
492 /** This method finds backtracking points where we should try to
493  * reorder the parameter ModelAction against.
494  *
495  * @param the ModelAction to find backtracking points for.
496  */
497 void ModelExecution::set_backtracking(ModelAction *act)
498 {
499         Thread *t = get_thread(act);
500         ModelAction *prev = get_last_conflict(act);
501         if (prev == NULL)
502                 return;
503
504         Node *node = prev->get_node()->get_parent();
505
506         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
507         int low_tid, high_tid;
508         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
509                 low_tid = id_to_int(act->get_tid());
510                 high_tid = low_tid + 1;
511         } else {
512                 low_tid = 0;
513                 high_tid = get_num_threads();
514         }
515
516         for (int i = low_tid; i < high_tid; i++) {
517                 thread_id_t tid = int_to_id(i);
518
519                 /* Make sure this thread can be enabled here. */
520                 if (i >= node->get_num_threads())
521                         break;
522
523                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
524                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
525                 if (node->enabled_status(tid) != THREAD_ENABLED)
526                         continue;
527
528                 /* Check if this has been explored already */
529                 if (node->has_been_explored(tid))
530                         continue;
531
532                 /* See if fairness allows */
533                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
534                         bool unfair = false;
535                         for (int t = 0; t < node->get_num_threads(); t++) {
536                                 thread_id_t tother = int_to_id(t);
537                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
538                                         unfair = true;
539                                         break;
540                                 }
541                         }
542                         if (unfair)
543                                 continue;
544                 }
545
546                 /* See if CHESS-like yield fairness allows */
547                 if (params->yieldon) {
548                         bool unfair = false;
549                         for (int t = 0; t < node->get_num_threads(); t++) {
550                                 thread_id_t tother = int_to_id(t);
551                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
552                                         unfair = true;
553                                         break;
554                                 }
555                         }
556                         if (unfair)
557                                 continue;
558                 }
559
560                 /* Cache the latest backtracking point */
561                 set_latest_backtrack(prev);
562
563                 /* If this is a new backtracking point, mark the tree */
564                 if (!node->set_backtrack(tid))
565                         continue;
566                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
567                                         id_to_int(prev->get_tid()),
568                                         id_to_int(t->get_id()));
569                 if (DBG_ENABLED()) {
570                         prev->print();
571                         act->print();
572                 }
573         }
574 }
575
576 /**
577  * @brief Cache the a backtracking point as the "most recent", if eligible
578  *
579  * Note that this does not prepare the NodeStack for this backtracking
580  * operation, it only caches the action on a per-execution basis
581  *
582  * @param act The operation at which we should explore a different next action
583  * (i.e., backtracking point)
584  * @return True, if this action is now the most recent backtracking point;
585  * false otherwise
586  */
587 bool ModelExecution::set_latest_backtrack(ModelAction *act)
588 {
589         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
590                 priv->next_backtrack = act;
591                 return true;
592         }
593         return false;
594 }
595
596 /**
597  * Returns last backtracking point. The model checker will explore a different
598  * path for this point in the next execution.
599  * @return The ModelAction at which the next execution should diverge.
600  */
601 ModelAction * ModelExecution::get_next_backtrack()
602 {
603         ModelAction *next = priv->next_backtrack;
604         priv->next_backtrack = NULL;
605         return next;
606 }
607
608 /**
609  * Processes a read model action.
610  * @param curr is the read model action to process.
611  * @return True if processing this read updates the mo_graph.
612  */
613 bool ModelExecution::process_read(ModelAction *curr)
614 {
615         Node *node = curr->get_node();
616         while (true) {
617                 bool updated = false;
618                 switch (node->get_read_from_status()) {
619                 case READ_FROM_PAST: {
620                         const ModelAction *rf = node->get_read_from_past();
621                         ASSERT(rf);
622
623                         mo_graph->startChanges();
624
625                         ASSERT(!is_infeasible());
626                         if (!check_recency(curr, rf)) {
627                                 if (node->increment_read_from()) {
628                                         mo_graph->rollbackChanges();
629                                         continue;
630                                 } else {
631                                         priv->too_many_reads = true;
632                                 }
633                         }
634
635                         updated = r_modification_order(curr, rf);
636                         read_from(curr, rf);
637                         mo_graph->commitChanges();
638                         mo_check_promises(curr, true);
639                         break;
640                 }
641                 case READ_FROM_PROMISE: {
642                         Promise *promise = curr->get_node()->get_read_from_promise();
643                         if (promise->add_reader(curr))
644                                 priv->failed_promise = true;
645                         curr->set_read_from_promise(promise);
646                         mo_graph->startChanges();
647                         if (!check_recency(curr, promise))
648                                 priv->too_many_reads = true;
649                         updated = r_modification_order(curr, promise);
650                         mo_graph->commitChanges();
651                         break;
652                 }
653                 case READ_FROM_FUTURE: {
654                         /* Read from future value */
655                         struct future_value fv = node->get_future_value();
656                         Promise *promise = new Promise(this, curr, fv);
657                         curr->set_read_from_promise(promise);
658                         promises.push_back(promise);
659                         mo_graph->startChanges();
660                         updated = r_modification_order(curr, promise);
661                         mo_graph->commitChanges();
662                         break;
663                 }
664                 default:
665                         ASSERT(false);
666                 }
667                 get_thread(curr)->set_return_value(curr->get_return_value());
668                 return updated;
669         }
670 }
671
672 /**
673  * Processes a lock, trylock, or unlock model action.  @param curr is
674  * the read model action to process.
675  *
676  * The try lock operation checks whether the lock is taken.  If not,
677  * it falls to the normal lock operation case.  If so, it returns
678  * fail.
679  *
680  * The lock operation has already been checked that it is enabled, so
681  * it just grabs the lock and synchronizes with the previous unlock.
682  *
683  * The unlock operation has to re-enable all of the threads that are
684  * waiting on the lock.
685  *
686  * @return True if synchronization was updated; false otherwise
687  */
688 bool ModelExecution::process_mutex(ModelAction *curr)
689 {
690         std::mutex *mutex = curr->get_mutex();
691         struct std::mutex_state *state = NULL;
692
693         if (mutex)
694                 state = mutex->get_state();
695
696         switch (curr->get_type()) {
697         case ATOMIC_TRYLOCK: {
698                 bool success = !state->locked;
699                 curr->set_try_lock(success);
700                 if (!success) {
701                         get_thread(curr)->set_return_value(0);
702                         break;
703                 }
704                 get_thread(curr)->set_return_value(1);
705         }
706                 //otherwise fall into the lock case
707         case ATOMIC_LOCK: {
708                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
709                         assert_bug("Lock access before initialization");
710                 state->locked = get_thread(curr);
711                 ModelAction *unlock = get_last_unlock(curr);
712                 //synchronize with the previous unlock statement
713                 if (unlock != NULL) {
714                         synchronize(unlock, curr);
715                         return true;
716                 }
717                 break;
718         }
719         case ATOMIC_WAIT:
720         case ATOMIC_UNLOCK: {
721                 /* wake up the other threads */
722                 for (unsigned int i = 0; i < get_num_threads(); i++) {
723                         Thread *t = get_thread(int_to_id(i));
724                         Thread *curr_thrd = get_thread(curr);
725                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
726                                 scheduler->wake(t);
727                 }
728
729                 /* unlock the lock - after checking who was waiting on it */
730                 state->locked = NULL;
731
732                 if (!curr->is_wait())
733                         break; /* The rest is only for ATOMIC_WAIT */
734
735                 /* Should we go to sleep? (simulate spurious failures) */
736                 if (curr->get_node()->get_misc() == 0) {
737                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
738                         /* disable us */
739                         scheduler->sleep(get_thread(curr));
740                 }
741                 break;
742         }
743         case ATOMIC_NOTIFY_ALL: {
744                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
745                 //activate all the waiting threads
746                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
747                         scheduler->wake(get_thread(*rit));
748                 }
749                 waiters->clear();
750                 break;
751         }
752         case ATOMIC_NOTIFY_ONE: {
753                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
754                 int wakeupthread = curr->get_node()->get_misc();
755                 action_list_t::iterator it = waiters->begin();
756                 advance(it, wakeupthread);
757                 scheduler->wake(get_thread(*it));
758                 waiters->erase(it);
759                 break;
760         }
761
762         default:
763                 ASSERT(0);
764         }
765         return false;
766 }
767
768 /**
769  * @brief Check if the current pending promises allow a future value to be sent
770  *
771  * It is unsafe to pass a future value back if there exists a pending promise Pr
772  * such that:
773  *
774  *    reader --exec-> Pr --exec-> writer
775  *
776  * If such Pr exists, we must save the pending future value until Pr is
777  * resolved.
778  *
779  * @param writer The operation which sends the future value. Must be a write.
780  * @param reader The operation which will observe the value. Must be a read.
781  * @return True if the future value can be sent now; false if it must wait.
782  */
783 bool ModelExecution::promises_may_allow(const ModelAction *writer,
784                 const ModelAction *reader) const
785 {
786         for (int i = promises.size() - 1; i >= 0; i--) {
787                 ModelAction *pr = promises[i]->get_reader(0);
788                 //reader is after promise...doesn't cross any promise
789                 if (*reader > *pr)
790                         return true;
791                 //writer is after promise, reader before...bad...
792                 if (*writer > *pr)
793                         return false;
794         }
795         return true;
796 }
797
798 /**
799  * @brief Add a future value to a reader
800  *
801  * This function performs a few additional checks to ensure that the future
802  * value can be feasibly observed by the reader
803  *
804  * @param writer The operation whose value is sent. Must be a write.
805  * @param reader The read operation which may read the future value. Must be a read.
806  */
807 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
808 {
809         /* Do more ambitious checks now that mo is more complete */
810         if (!mo_may_allow(writer, reader))
811                 return;
812
813         Node *node = reader->get_node();
814
815         /* Find an ancestor thread which exists at the time of the reader */
816         Thread *write_thread = get_thread(writer);
817         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
818                 write_thread = write_thread->get_parent();
819
820         struct future_value fv = {
821                 writer->get_write_value(),
822                 writer->get_seq_number() + params->maxfuturedelay,
823                 write_thread->get_id(),
824         };
825         if (node->add_future_value(fv))
826                 set_latest_backtrack(reader);
827 }
828
829 /**
830  * Process a write ModelAction
831  * @param curr The ModelAction to process
832  * @param work The work queue, for adding fixup work
833  * @return True if the mo_graph was updated or promises were resolved
834  */
835 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
836 {
837         /* Readers to which we may send our future value */
838         ModelVector<ModelAction *> send_fv;
839
840         const ModelAction *earliest_promise_reader;
841         bool updated_promises = false;
842
843         bool updated_mod_order = w_modification_order(curr, &send_fv);
844         Promise *promise = pop_promise_to_resolve(curr);
845
846         if (promise) {
847                 earliest_promise_reader = promise->get_reader(0);
848                 updated_promises = resolve_promise(curr, promise, work);
849         } else
850                 earliest_promise_reader = NULL;
851
852         for (unsigned int i = 0; i < send_fv.size(); i++) {
853                 ModelAction *read = send_fv[i];
854
855                 /* Don't send future values to reads after the Promise we resolve */
856                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
857                         /* Check if future value can be sent immediately */
858                         if (promises_may_allow(curr, read)) {
859                                 add_future_value(curr, read);
860                         } else {
861                                 futurevalues.push_back(PendingFutureValue(curr, read));
862                         }
863                 }
864         }
865
866         /* Check the pending future values */
867         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
868                 struct PendingFutureValue pfv = futurevalues[i];
869                 if (promises_may_allow(pfv.writer, pfv.reader)) {
870                         add_future_value(pfv.writer, pfv.reader);
871                         futurevalues.erase(futurevalues.begin() + i);
872                 }
873         }
874
875         mo_graph->commitChanges();
876         mo_check_promises(curr, false);
877
878         get_thread(curr)->set_return_value(VALUE_NONE);
879         return updated_mod_order || updated_promises;
880 }
881
882 /**
883  * Process a fence ModelAction
884  * @param curr The ModelAction to process
885  * @return True if synchronization was updated
886  */
887 bool ModelExecution::process_fence(ModelAction *curr)
888 {
889         /*
890          * fence-relaxed: no-op
891          * fence-release: only log the occurence (not in this function), for
892          *   use in later synchronization
893          * fence-acquire (this function): search for hypothetical release
894          *   sequences
895          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
896          */
897         bool updated = false;
898         if (curr->is_acquire()) {
899                 action_list_t *list = &action_trace;
900                 action_list_t::reverse_iterator rit;
901                 /* Find X : is_read(X) && X --sb-> curr */
902                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
903                         ModelAction *act = *rit;
904                         if (act == curr)
905                                 continue;
906                         if (act->get_tid() != curr->get_tid())
907                                 continue;
908                         /* Stop at the beginning of the thread */
909                         if (act->is_thread_start())
910                                 break;
911                         /* Stop once we reach a prior fence-acquire */
912                         if (act->is_fence() && act->is_acquire())
913                                 break;
914                         if (!act->is_read())
915                                 continue;
916                         /* read-acquire will find its own release sequences */
917                         if (act->is_acquire())
918                                 continue;
919
920                         /* Establish hypothetical release sequences */
921                         rel_heads_list_t release_heads;
922                         get_release_seq_heads(curr, act, &release_heads);
923                         for (unsigned int i = 0; i < release_heads.size(); i++)
924                                 synchronize(release_heads[i], curr);
925                         if (release_heads.size() != 0)
926                                 updated = true;
927                 }
928         }
929         return updated;
930 }
931
932 /**
933  * @brief Process the current action for thread-related activity
934  *
935  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
936  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
937  * synchronization, etc.  This function is a no-op for non-THREAD actions
938  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
939  *
940  * @param curr The current action
941  * @return True if synchronization was updated or a thread completed
942  */
943 bool ModelExecution::process_thread_action(ModelAction *curr)
944 {
945         bool updated = false;
946
947         switch (curr->get_type()) {
948         case THREAD_CREATE: {
949                 thrd_t *thrd = (thrd_t *)curr->get_location();
950                 struct thread_params *params = (struct thread_params *)curr->get_value();
951                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
952                 add_thread(th);
953                 th->set_creation(curr);
954                 /* Promises can be satisfied by children */
955                 for (unsigned int i = 0; i < promises.size(); i++) {
956                         Promise *promise = promises[i];
957                         if (promise->thread_is_available(curr->get_tid()))
958                                 promise->add_thread(th->get_id());
959                 }
960                 break;
961         }
962         case PTHREAD_CREATE: {
963                 thrd_t *thrd = (thrd_t *)curr->get_location();
964                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
965                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
966                 add_thread(th);
967                 th->set_creation(curr);
968                 /* Promises can be satisfied by children */
969                 for (unsigned int i = 0; i < promises.size(); i++) {
970                         Promise *promise = promises[i];
971                         if (promise->thread_is_available(curr->get_tid()))
972                                 promise->add_thread(th->get_id());
973                 }
974                 break;
975         }
976         case THREAD_JOIN: {
977                 Thread *blocking = curr->get_thread_operand();
978                 ModelAction *act = get_last_action(blocking->get_id());
979                 synchronize(act, curr);
980                 updated = true; /* trigger rel-seq checks */
981                 break;
982         }
983         case PTHREAD_JOIN: {
984                 break; // WL: to be add (modified)
985                 Thread *blocking = curr->get_thread_operand();
986                 ModelAction *act = get_last_action(blocking->get_id());
987                 synchronize(act, curr);
988                 updated = true; /* trigger rel-seq checks */
989         }
990
991         case THREAD_FINISH: {
992                 Thread *th = get_thread(curr);
993                 /* Wake up any joining threads */
994                 for (unsigned int i = 0; i < get_num_threads(); i++) {
995                         Thread *waiting = get_thread(int_to_id(i));
996                         if (waiting->waiting_on() == th &&
997                                         waiting->get_pending()->is_thread_join())
998                                 scheduler->wake(waiting);
999                 }
1000                 th->complete();
1001                 /* Completed thread can't satisfy promises */
1002                 for (unsigned int i = 0; i < promises.size(); i++) {
1003                         Promise *promise = promises[i];
1004                         if (promise->thread_is_available(th->get_id()))
1005                                 if (promise->eliminate_thread(th->get_id()))
1006                                         priv->failed_promise = true;
1007                 }
1008                 updated = true; /* trigger rel-seq checks */
1009                 break;
1010         }
1011         case THREAD_START: {
1012                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1013                 break;
1014         }
1015         default:
1016                 break;
1017         }
1018
1019         return updated;
1020 }
1021
1022 /**
1023  * @brief Process the current action for release sequence fixup activity
1024  *
1025  * Performs model-checker release sequence fixups for the current action,
1026  * forcing a single pending release sequence to break (with a given, potential
1027  * "loose" write) or to complete (i.e., synchronize). If a pending release
1028  * sequence forms a complete release sequence, then we must perform the fixup
1029  * synchronization, mo_graph additions, etc.
1030  *
1031  * @param curr The current action; must be a release sequence fixup action
1032  * @param work_queue The work queue to which to add work items as they are
1033  * generated
1034  */
1035 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1036 {
1037         const ModelAction *write = curr->get_node()->get_relseq_break();
1038         struct release_seq *sequence = pending_rel_seqs.back();
1039         pending_rel_seqs.pop_back();
1040         ASSERT(sequence);
1041         ModelAction *acquire = sequence->acquire;
1042         const ModelAction *rf = sequence->rf;
1043         const ModelAction *release = sequence->release;
1044         ASSERT(acquire);
1045         ASSERT(release);
1046         ASSERT(rf);
1047         ASSERT(release->same_thread(rf));
1048
1049         if (write == NULL) {
1050                 /**
1051                  * @todo Forcing a synchronization requires that we set
1052                  * modification order constraints. For instance, we can't allow
1053                  * a fixup sequence in which two separate read-acquire
1054                  * operations read from the same sequence, where the first one
1055                  * synchronizes and the other doesn't. Essentially, we can't
1056                  * allow any writes to insert themselves between 'release' and
1057                  * 'rf'
1058                  */
1059
1060                 /* Must synchronize */
1061                 if (!synchronize(release, acquire))
1062                         return;
1063
1064                 /* Propagate the changed clock vector */
1065                 propagate_clockvector(acquire, work_queue);
1066         } else {
1067                 /* Break release sequence with new edges:
1068                  *   release --mo--> write --mo--> rf */
1069                 mo_graph->addEdge(release, write);
1070                 mo_graph->addEdge(write, rf);
1071         }
1072
1073         /* See if we have realized a data race */
1074         checkDataRaces();
1075 }
1076
1077 /**
1078  * Initialize the current action by performing one or more of the following
1079  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1080  * in the NodeStack, manipulating backtracking sets, allocating and
1081  * initializing clock vectors, and computing the promises to fulfill.
1082  *
1083  * @param curr The current action, as passed from the user context; may be
1084  * freed/invalidated after the execution of this function, with a different
1085  * action "returned" its place (pass-by-reference)
1086  * @return True if curr is a newly-explored action; false otherwise
1087  */
1088 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1089 {
1090         ModelAction *newcurr;
1091
1092         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1093                 newcurr = process_rmw(*curr);
1094                 delete *curr;
1095
1096                 if (newcurr->is_rmw())
1097                         compute_promises(newcurr);
1098
1099                 *curr = newcurr;
1100                 return false;
1101         }
1102
1103         (*curr)->set_seq_number(get_next_seq_num());
1104
1105         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1106         if (newcurr) {
1107                 /* First restore type and order in case of RMW operation */
1108                 if ((*curr)->is_rmwr())
1109                         newcurr->copy_typeandorder(*curr);
1110
1111                 ASSERT((*curr)->get_location() == newcurr->get_location());
1112                 newcurr->copy_from_new(*curr);
1113
1114                 /* Discard duplicate ModelAction; use action from NodeStack */
1115                 delete *curr;
1116
1117                 /* Always compute new clock vector */
1118                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1119
1120                 *curr = newcurr;
1121                 return false; /* Action was explored previously */
1122         } else {
1123                 newcurr = *curr;
1124
1125                 /* Always compute new clock vector */
1126                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1127
1128                 /* Assign most recent release fence */
1129                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1130
1131                 /*
1132                  * Perform one-time actions when pushing new ModelAction onto
1133                  * NodeStack
1134                  */
1135                 if (newcurr->is_write())
1136                         compute_promises(newcurr);
1137                 else if (newcurr->is_relseq_fixup())
1138                         compute_relseq_breakwrites(newcurr);
1139                 else if (newcurr->is_wait())
1140                         newcurr->get_node()->set_misc_max(2);
1141                 else if (newcurr->is_notify_one()) {
1142                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1143                 }
1144                 return true; /* This was a new ModelAction */
1145         }
1146 }
1147
1148 /**
1149  * @brief Establish reads-from relation between two actions
1150  *
1151  * Perform basic operations involved with establishing a concrete rf relation,
1152  * including setting the ModelAction data and checking for release sequences.
1153  *
1154  * @param act The action that is reading (must be a read)
1155  * @param rf The action from which we are reading (must be a write)
1156  *
1157  * @return True if this read established synchronization
1158  */
1159
1160 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1161 {
1162         ASSERT(rf);
1163         ASSERT(rf->is_write());
1164
1165         act->set_read_from(rf);
1166         if (act->is_acquire()) {
1167                 rel_heads_list_t release_heads;
1168                 get_release_seq_heads(act, act, &release_heads);
1169                 int num_heads = release_heads.size();
1170                 for (unsigned int i = 0; i < release_heads.size(); i++)
1171                         if (!synchronize(release_heads[i], act))
1172                                 num_heads--;
1173                 return num_heads > 0;
1174         }
1175         return false;
1176 }
1177
1178 /**
1179  * @brief Synchronizes two actions
1180  *
1181  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1182  * This function performs the synchronization as well as providing other hooks
1183  * for other checks along with synchronization.
1184  *
1185  * @param first The left-hand side of the synchronizes-with relation
1186  * @param second The right-hand side of the synchronizes-with relation
1187  * @return True if the synchronization was successful (i.e., was consistent
1188  * with the execution order); false otherwise
1189  */
1190 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1191 {
1192         if (*second < *first) {
1193                 set_bad_synchronization();
1194                 return false;
1195         }
1196         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1197         return second->synchronize_with(first);
1198 }
1199
1200 /**
1201  * Check promises and eliminate potentially-satisfying threads when a thread is
1202  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1203  * no longer satisfy a promise generated from that thread.
1204  *
1205  * @param blocker The thread on which a thread is waiting
1206  * @param waiting The waiting thread
1207  */
1208 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1209 {
1210         for (unsigned int i = 0; i < promises.size(); i++) {
1211                 Promise *promise = promises[i];
1212                 if (!promise->thread_is_available(waiting->get_id()))
1213                         continue;
1214                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1215                         ModelAction *reader = promise->get_reader(j);
1216                         if (reader->get_tid() != blocker->get_id())
1217                                 continue;
1218                         if (promise->eliminate_thread(waiting->get_id())) {
1219                                 /* Promise has failed */
1220                                 priv->failed_promise = true;
1221                         } else {
1222                                 /* Only eliminate the 'waiting' thread once */
1223                                 return;
1224                         }
1225                 }
1226         }
1227 }
1228
1229 /**
1230  * @brief Check whether a model action is enabled.
1231  *
1232  * Checks whether an operation would be successful (i.e., is a lock already
1233  * locked, or is the joined thread already complete).
1234  *
1235  * For yield-blocking, yields are never enabled.
1236  *
1237  * @param curr is the ModelAction to check whether it is enabled.
1238  * @return a bool that indicates whether the action is enabled.
1239  */
1240 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1241         if (curr->is_lock()) {
1242                 std::mutex *lock = curr->get_mutex();
1243                 struct std::mutex_state *state = lock->get_state();
1244                 if (state->locked)
1245                         return false;
1246         } else if (curr->is_thread_join()) {
1247                 Thread *blocking = curr->get_thread_operand();
1248                 if (!blocking->is_complete()) {
1249                         thread_blocking_check_promises(blocking, get_thread(curr));
1250                         return false;
1251                 }
1252         } else if (params->yieldblock && curr->is_yield()) {
1253                 return false;
1254         }
1255
1256         return true;
1257 }
1258
1259 /**
1260  * This is the heart of the model checker routine. It performs model-checking
1261  * actions corresponding to a given "current action." Among other processes, it
1262  * calculates reads-from relationships, updates synchronization clock vectors,
1263  * forms a memory_order constraints graph, and handles replay/backtrack
1264  * execution when running permutations of previously-observed executions.
1265  *
1266  * @param curr The current action to process
1267  * @return The ModelAction that is actually executed; may be different than
1268  * curr
1269  */
1270 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1271 {
1272         ASSERT(curr);
1273         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1274         bool newly_explored = initialize_curr_action(&curr);
1275
1276         DBG();
1277
1278         wake_up_sleeping_actions(curr);
1279
1280         /* Compute fairness information for CHESS yield algorithm */
1281         if (params->yieldon) {
1282                 curr->get_node()->update_yield(scheduler);
1283         }
1284
1285         /* Add the action to lists before any other model-checking tasks */
1286         if (!second_part_of_rmw)
1287                 add_action_to_lists(curr);
1288
1289         /* Build may_read_from set for newly-created actions */
1290         if (newly_explored && curr->is_read())
1291                 build_may_read_from(curr);
1292
1293         /* Initialize work_queue with the "current action" work */
1294         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1295         while (!work_queue.empty() && !has_asserted()) {
1296                 WorkQueueEntry work = work_queue.front();
1297                 work_queue.pop_front();
1298
1299                 switch (work.type) {
1300                 case WORK_CHECK_CURR_ACTION: {
1301                         ModelAction *act = work.action;
1302                         bool update = false; /* update this location's release seq's */
1303                         bool update_all = false; /* update all release seq's */
1304
1305                         if (process_thread_action(curr))
1306                                 update_all = true;
1307
1308                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1309                                 update = true;
1310
1311                         if (act->is_write() && process_write(act, &work_queue))
1312                                 update = true;
1313
1314                         if (act->is_fence() && process_fence(act))
1315                                 update_all = true;
1316
1317                         if (act->is_mutex_op() && process_mutex(act))
1318                                 update_all = true;
1319
1320                         if (act->is_relseq_fixup())
1321                                 process_relseq_fixup(curr, &work_queue);
1322
1323                         if (update_all)
1324                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1325                         else if (update)
1326                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1327                         break;
1328                 }
1329                 case WORK_CHECK_RELEASE_SEQ:
1330                         resolve_release_sequences(work.location, &work_queue);
1331                         break;
1332                 case WORK_CHECK_MO_EDGES: {
1333                         /** @todo Complete verification of work_queue */
1334                         ModelAction *act = work.action;
1335                         bool updated = false;
1336
1337                         if (act->is_read()) {
1338                                 const ModelAction *rf = act->get_reads_from();
1339                                 const Promise *promise = act->get_reads_from_promise();
1340                                 if (rf) {
1341                                         if (r_modification_order(act, rf))
1342                                                 updated = true;
1343                                         if (act->is_seqcst()) {
1344                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1345                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1346                                                         set_bad_sc_read();
1347                                                 }
1348                                         }
1349                                 } else if (promise) {
1350                                         if (r_modification_order(act, promise))
1351                                                 updated = true;
1352                                 }
1353                         }
1354                         if (act->is_write()) {
1355                                 if (w_modification_order(act, NULL))
1356                                         updated = true;
1357                         }
1358                         mo_graph->commitChanges();
1359
1360                         if (updated)
1361                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1362                         break;
1363                 }
1364                 default:
1365                         ASSERT(false);
1366                         break;
1367                 }
1368         }
1369
1370         check_curr_backtracking(curr);
1371         set_backtracking(curr);
1372         return curr;
1373 }
1374
1375 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1376 {
1377         Node *currnode = curr->get_node();
1378         Node *parnode = currnode->get_parent();
1379
1380         if ((parnode && !parnode->backtrack_empty()) ||
1381                          !currnode->misc_empty() ||
1382                          !currnode->read_from_empty() ||
1383                          !currnode->promise_empty() ||
1384                          !currnode->relseq_break_empty()) {
1385                 set_latest_backtrack(curr);
1386         }
1387 }
1388
1389 bool ModelExecution::promises_expired() const
1390 {
1391         for (unsigned int i = 0; i < promises.size(); i++) {
1392                 Promise *promise = promises[i];
1393                 if (promise->get_expiration() < priv->used_sequence_numbers)
1394                         return true;
1395         }
1396         return false;
1397 }
1398
1399 /**
1400  * This is the strongest feasibility check available.
1401  * @return whether the current trace (partial or complete) must be a prefix of
1402  * a feasible trace.
1403  */
1404 bool ModelExecution::isfeasibleprefix() const
1405 {
1406         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1407 }
1408
1409 /**
1410  * Print disagnostic information about an infeasible execution
1411  * @param prefix A string to prefix the output with; if NULL, then a default
1412  * message prefix will be provided
1413  */
1414 void ModelExecution::print_infeasibility(const char *prefix) const
1415 {
1416         char buf[100];
1417         char *ptr = buf;
1418         if (mo_graph->checkForCycles())
1419                 ptr += sprintf(ptr, "[mo cycle]");
1420         if (priv->failed_promise || priv->hard_failed_promise)
1421                 ptr += sprintf(ptr, "[failed promise]");
1422         if (priv->too_many_reads)
1423                 ptr += sprintf(ptr, "[too many reads]");
1424         if (priv->no_valid_reads)
1425                 ptr += sprintf(ptr, "[no valid reads-from]");
1426         if (priv->bad_synchronization)
1427                 ptr += sprintf(ptr, "[bad sw ordering]");
1428         if (priv->bad_sc_read)
1429                 ptr += sprintf(ptr, "[bad sc read]");
1430         if (promises_expired())
1431                 ptr += sprintf(ptr, "[promise expired]");
1432         if (promises.size() != 0)
1433                 ptr += sprintf(ptr, "[unresolved promise]");
1434         if (ptr != buf)
1435                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1436 }
1437
1438 /**
1439  * Returns whether the current completed trace is feasible, except for pending
1440  * release sequences.
1441  */
1442 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1443 {
1444         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1445
1446 }
1447
1448 /**
1449  * Check if the current partial trace is infeasible. Does not check any
1450  * end-of-execution flags, which might rule out the execution. Thus, this is
1451  * useful only for ruling an execution as infeasible.
1452  * @return whether the current partial trace is infeasible.
1453  */
1454 bool ModelExecution::is_infeasible() const
1455 {
1456         return mo_graph->checkForCycles() ||
1457                 priv->no_valid_reads ||
1458                 priv->too_many_reads ||
1459                 priv->bad_synchronization ||
1460                 priv->bad_sc_read ||
1461                 priv->hard_failed_promise ||
1462                 promises_expired();
1463 }
1464
1465 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1466 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1467         ModelAction *lastread = get_last_action(act->get_tid());
1468         lastread->process_rmw(act);
1469         if (act->is_rmw()) {
1470                 if (lastread->get_reads_from())
1471                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1472                 else
1473                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1474                 mo_graph->commitChanges();
1475         }
1476         return lastread;
1477 }
1478
1479 /**
1480  * A helper function for ModelExecution::check_recency, to check if the current
1481  * thread is able to read from a different write/promise for 'params.maxreads'
1482  * number of steps and if that write/promise should become visible (i.e., is
1483  * ordered later in the modification order). This helps model memory liveness.
1484  *
1485  * @param curr The current action. Must be a read.
1486  * @param rf The write/promise from which we plan to read
1487  * @param other_rf The write/promise from which we may read
1488  * @return True if we were able to read from other_rf for params.maxreads steps
1489  */
1490 template <typename T, typename U>
1491 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1492 {
1493         /* Need a different write/promise */
1494         if (other_rf->equals(rf))
1495                 return false;
1496
1497         /* Only look for "newer" writes/promises */
1498         if (!mo_graph->checkReachable(rf, other_rf))
1499                 return false;
1500
1501         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1502         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1503         action_list_t::reverse_iterator rit = list->rbegin();
1504         ASSERT((*rit) == curr);
1505         /* Skip past curr */
1506         rit++;
1507
1508         /* Does this write/promise work for everyone? */
1509         for (int i = 0; i < params->maxreads; i++, rit++) {
1510                 ModelAction *act = *rit;
1511                 if (!act->may_read_from(other_rf))
1512                         return false;
1513         }
1514         return true;
1515 }
1516
1517 /**
1518  * Checks whether a thread has read from the same write or Promise for too many
1519  * times without seeing the effects of a later write/Promise.
1520  *
1521  * Basic idea:
1522  * 1) there must a different write/promise that we could read from,
1523  * 2) we must have read from the same write/promise in excess of maxreads times,
1524  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1525  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1526  *
1527  * If so, we decide that the execution is no longer feasible.
1528  *
1529  * @param curr The current action. Must be a read.
1530  * @param rf The ModelAction/Promise from which we might read.
1531  * @return True if the read should succeed; false otherwise
1532  */
1533 template <typename T>
1534 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1535 {
1536         if (!params->maxreads)
1537                 return true;
1538
1539         //NOTE: Next check is just optimization, not really necessary....
1540         if (curr->get_node()->get_read_from_past_size() +
1541                         curr->get_node()->get_read_from_promise_size() <= 1)
1542                 return true;
1543
1544         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1545         int tid = id_to_int(curr->get_tid());
1546         ASSERT(tid < (int)thrd_lists->size());
1547         action_list_t *list = &(*thrd_lists)[tid];
1548         action_list_t::reverse_iterator rit = list->rbegin();
1549         ASSERT((*rit) == curr);
1550         /* Skip past curr */
1551         rit++;
1552
1553         action_list_t::reverse_iterator ritcopy = rit;
1554         /* See if we have enough reads from the same value */
1555         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1556                 if (ritcopy == list->rend())
1557                         return true;
1558                 ModelAction *act = *ritcopy;
1559                 if (!act->is_read())
1560                         return true;
1561                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1562                         return true;
1563                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1564                         return true;
1565                 if (act->get_node()->get_read_from_past_size() +
1566                                 act->get_node()->get_read_from_promise_size() <= 1)
1567                         return true;
1568         }
1569         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1570                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1571                 if (should_read_instead(curr, rf, write))
1572                         return false; /* liveness failure */
1573         }
1574         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1575                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1576                 if (should_read_instead(curr, rf, promise))
1577                         return false; /* liveness failure */
1578         }
1579         return true;
1580 }
1581
1582 /**
1583  * @brief Updates the mo_graph with the constraints imposed from the current
1584  * read.
1585  *
1586  * Basic idea is the following: Go through each other thread and find
1587  * the last action that happened before our read.  Two cases:
1588  *
1589  * -# The action is a write: that write must either occur before
1590  * the write we read from or be the write we read from.
1591  * -# The action is a read: the write that that action read from
1592  * must occur before the write we read from or be the same write.
1593  *
1594  * @param curr The current action. Must be a read.
1595  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1596  * @return True if modification order edges were added; false otherwise
1597  */
1598 template <typename rf_type>
1599 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1600 {
1601         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1602         unsigned int i;
1603         bool added = false;
1604         ASSERT(curr->is_read());
1605
1606         /* Last SC fence in the current thread */
1607         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1608         ModelAction *last_sc_write = NULL;
1609         if (curr->is_seqcst())
1610                 last_sc_write = get_last_seq_cst_write(curr);
1611
1612         /* Iterate over all threads */
1613         for (i = 0; i < thrd_lists->size(); i++) {
1614                 /* Last SC fence in thread i */
1615                 ModelAction *last_sc_fence_thread_local = NULL;
1616                 if (int_to_id((int)i) != curr->get_tid())
1617                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1618
1619                 /* Last SC fence in thread i, before last SC fence in current thread */
1620                 ModelAction *last_sc_fence_thread_before = NULL;
1621                 if (last_sc_fence_local)
1622                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1623
1624                 /* Iterate over actions in thread, starting from most recent */
1625                 action_list_t *list = &(*thrd_lists)[i];
1626                 action_list_t::reverse_iterator rit;
1627                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1628                         ModelAction *act = *rit;
1629
1630                         /* Skip curr */
1631                         if (act == curr)
1632                                 continue;
1633                         /* Don't want to add reflexive edges on 'rf' */
1634                         if (act->equals(rf)) {
1635                                 if (act->happens_before(curr))
1636                                         break;
1637                                 else
1638                                         continue;
1639                         }
1640
1641                         if (act->is_write()) {
1642                                 /* C++, Section 29.3 statement 5 */
1643                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1644                                                 *act < *last_sc_fence_thread_local) {
1645                                         added = mo_graph->addEdge(act, rf) || added;
1646                                         break;
1647                                 }
1648                                 /* C++, Section 29.3 statement 4 */
1649                                 else if (act->is_seqcst() && last_sc_fence_local &&
1650                                                 *act < *last_sc_fence_local) {
1651                                         added = mo_graph->addEdge(act, rf) || added;
1652                                         break;
1653                                 }
1654                                 /* C++, Section 29.3 statement 6 */
1655                                 else if (last_sc_fence_thread_before &&
1656                                                 *act < *last_sc_fence_thread_before) {
1657                                         added = mo_graph->addEdge(act, rf) || added;
1658                                         break;
1659                                 }
1660                         }
1661
1662                         /*
1663                          * Include at most one act per-thread that "happens
1664                          * before" curr
1665                          */
1666                         if (act->happens_before(curr)) {
1667                                 if (act->is_write()) {
1668                                         added = mo_graph->addEdge(act, rf) || added;
1669                                 } else {
1670                                         const ModelAction *prevrf = act->get_reads_from();
1671                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1672                                         if (prevrf) {
1673                                                 if (!prevrf->equals(rf))
1674                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1675                                         } else if (!prevrf_promise->equals(rf)) {
1676                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1677                                         }
1678                                 }
1679                                 break;
1680                         }
1681                 }
1682         }
1683
1684         /*
1685          * All compatible, thread-exclusive promises must be ordered after any
1686          * concrete loads from the same thread
1687          */
1688         for (unsigned int i = 0; i < promises.size(); i++)
1689                 if (promises[i]->is_compatible_exclusive(curr))
1690                         added = mo_graph->addEdge(rf, promises[i]) || added;
1691
1692         return added;
1693 }
1694
1695 /**
1696  * Updates the mo_graph with the constraints imposed from the current write.
1697  *
1698  * Basic idea is the following: Go through each other thread and find
1699  * the lastest action that happened before our write.  Two cases:
1700  *
1701  * (1) The action is a write => that write must occur before
1702  * the current write
1703  *
1704  * (2) The action is a read => the write that that action read from
1705  * must occur before the current write.
1706  *
1707  * This method also handles two other issues:
1708  *
1709  * (I) Sequential Consistency: Making sure that if the current write is
1710  * seq_cst, that it occurs after the previous seq_cst write.
1711  *
1712  * (II) Sending the write back to non-synchronizing reads.
1713  *
1714  * @param curr The current action. Must be a write.
1715  * @param send_fv A vector for stashing reads to which we may pass our future
1716  * value. If NULL, then don't record any future values.
1717  * @return True if modification order edges were added; false otherwise
1718  */
1719 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1720 {
1721         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1722         unsigned int i;
1723         bool added = false;
1724         ASSERT(curr->is_write());
1725
1726         if (curr->is_seqcst()) {
1727                 /* We have to at least see the last sequentially consistent write,
1728                          so we are initialized. */
1729                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1730                 if (last_seq_cst != NULL) {
1731                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1732                 }
1733         }
1734
1735         /* Last SC fence in the current thread */
1736         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1737
1738         /* Iterate over all threads */
1739         for (i = 0; i < thrd_lists->size(); i++) {
1740                 /* Last SC fence in thread i, before last SC fence in current thread */
1741                 ModelAction *last_sc_fence_thread_before = NULL;
1742                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1743                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1744
1745                 /* Iterate over actions in thread, starting from most recent */
1746                 action_list_t *list = &(*thrd_lists)[i];
1747                 action_list_t::reverse_iterator rit;
1748                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1749                         ModelAction *act = *rit;
1750                         if (act == curr) {
1751                                 /*
1752                                  * 1) If RMW and it actually read from something, then we
1753                                  * already have all relevant edges, so just skip to next
1754                                  * thread.
1755                                  *
1756                                  * 2) If RMW and it didn't read from anything, we should
1757                                  * whatever edge we can get to speed up convergence.
1758                                  *
1759                                  * 3) If normal write, we need to look at earlier actions, so
1760                                  * continue processing list.
1761                                  */
1762                                 if (curr->is_rmw()) {
1763                                         if (curr->get_reads_from() != NULL)
1764                                                 break;
1765                                         else
1766                                                 continue;
1767                                 } else
1768                                         continue;
1769                         }
1770
1771                         /* C++, Section 29.3 statement 7 */
1772                         if (last_sc_fence_thread_before && act->is_write() &&
1773                                         *act < *last_sc_fence_thread_before) {
1774                                 added = mo_graph->addEdge(act, curr) || added;
1775                                 break;
1776                         }
1777
1778                         /*
1779                          * Include at most one act per-thread that "happens
1780                          * before" curr
1781                          */
1782                         if (act->happens_before(curr)) {
1783                                 /*
1784                                  * Note: if act is RMW, just add edge:
1785                                  *   act --mo--> curr
1786                                  * The following edge should be handled elsewhere:
1787                                  *   readfrom(act) --mo--> act
1788                                  */
1789                                 if (act->is_write())
1790                                         added = mo_graph->addEdge(act, curr) || added;
1791                                 else if (act->is_read()) {
1792                                         //if previous read accessed a null, just keep going
1793                                         if (act->get_reads_from() == NULL) {
1794                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1795                                         } else
1796                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1797                                 }
1798                                 break;
1799                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1800                                                      !act->same_thread(curr)) {
1801                                 /* We have an action that:
1802                                    (1) did not happen before us
1803                                    (2) is a read and we are a write
1804                                    (3) cannot synchronize with us
1805                                    (4) is in a different thread
1806                                    =>
1807                                    that read could potentially read from our write.  Note that
1808                                    these checks are overly conservative at this point, we'll
1809                                    do more checks before actually removing the
1810                                    pendingfuturevalue.
1811
1812                                  */
1813
1814                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1815                                         if (!is_infeasible())
1816                                                 send_fv->push_back(act);
1817                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1818                                                 add_future_value(curr, act);
1819                                 }
1820                         }
1821                 }
1822         }
1823
1824         /*
1825          * All compatible, thread-exclusive promises must be ordered after any
1826          * concrete stores to the same thread, or else they can be merged with
1827          * this store later
1828          */
1829         for (unsigned int i = 0; i < promises.size(); i++)
1830                 if (promises[i]->is_compatible_exclusive(curr))
1831                         added = mo_graph->addEdge(curr, promises[i]) || added;
1832
1833         return added;
1834 }
1835
1836 //This procedure uses cohere to prune future values that are
1837 //guaranteed to generate a coherence violation.
1838 //
1839 //need to see if there is (1) a promise for thread write, (2)
1840 //the promise is sb before write, (3) the promise can only be
1841 //resolved by the thread read, and (4) the promise has same
1842 //location as read/write
1843
1844 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1845         thread_id_t write_tid=write->get_tid();
1846         for(unsigned int i = promises.size(); i>0; i--) {
1847                 Promise *pr=promises[i-1];
1848                 if (!pr->same_location(write))
1849                         continue;
1850                 //the reading thread is the only thread that can resolve the promise
1851                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1852                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1853                                 ModelAction *prreader=pr->get_reader(j);
1854                                 //the writing thread reads from the promise before the write
1855                                 if (prreader->get_tid()==write_tid &&
1856                                                 (*prreader)<(*write)) {
1857                                         if ((*read)>(*prreader)) {
1858                                                 //check that we don't have a read between the read and promise
1859                                                 //from the same thread as read
1860                                                 bool okay=false;
1861                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1862                                                         tmp=tmp->get_node()->get_parent()->get_action();
1863                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1864                                                                 okay=true;
1865                                                                 break;
1866                                                         }
1867                                                 }
1868                                                 if (okay)
1869                                                         continue;
1870                                         }
1871                                         return false;
1872                                 }
1873                         }
1874                 }
1875         }
1876         return true;
1877 }
1878
1879
1880 /** Arbitrary reads from the future are not allowed.  Section 29.3
1881  * part 9 places some constraints.  This method checks one result of constraint
1882  * constraint.  Others require compiler support. */
1883 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1884 {
1885         if (!writer->is_rmw())
1886                 return true;
1887
1888         if (!reader->is_rmw())
1889                 return true;
1890
1891         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1892                 if (search == reader)
1893                         return false;
1894                 if (search->get_tid() == reader->get_tid() &&
1895                                 search->happens_before(reader))
1896                         break;
1897         }
1898
1899         return true;
1900 }
1901
1902 /**
1903  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1904  * some constraints. This method checks one the following constraint (others
1905  * require compiler support):
1906  *
1907  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1908  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1909  */
1910 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1911 {
1912         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1913         unsigned int i;
1914         /* Iterate over all threads */
1915         for (i = 0; i < thrd_lists->size(); i++) {
1916                 const ModelAction *write_after_read = NULL;
1917
1918                 /* Iterate over actions in thread, starting from most recent */
1919                 action_list_t *list = &(*thrd_lists)[i];
1920                 action_list_t::reverse_iterator rit;
1921                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1922                         ModelAction *act = *rit;
1923
1924                         /* Don't disallow due to act == reader */
1925                         if (!reader->happens_before(act) || reader == act)
1926                                 break;
1927                         else if (act->is_write())
1928                                 write_after_read = act;
1929                         else if (act->is_read() && act->get_reads_from() != NULL)
1930                                 write_after_read = act->get_reads_from();
1931                 }
1932
1933                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1934                         return false;
1935         }
1936         return true;
1937 }
1938
1939 /**
1940  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1941  * The ModelAction under consideration is expected to be taking part in
1942  * release/acquire synchronization as an object of the "reads from" relation.
1943  * Note that this can only provide release sequence support for RMW chains
1944  * which do not read from the future, as those actions cannot be traced until
1945  * their "promise" is fulfilled. Similarly, we may not even establish the
1946  * presence of a release sequence with certainty, as some modification order
1947  * constraints may be decided further in the future. Thus, this function
1948  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1949  * and a boolean representing certainty.
1950  *
1951  * @param rf The action that might be part of a release sequence. Must be a
1952  * write.
1953  * @param release_heads A pass-by-reference style return parameter. After
1954  * execution of this function, release_heads will contain the heads of all the
1955  * relevant release sequences, if any exists with certainty
1956  * @param pending A pass-by-reference style return parameter which is only used
1957  * when returning false (i.e., uncertain). Returns most information regarding
1958  * an uncertain release sequence, including any write operations that might
1959  * break the sequence.
1960  * @return true, if the ModelExecution is certain that release_heads is complete;
1961  * false otherwise
1962  */
1963 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1964                 rel_heads_list_t *release_heads,
1965                 struct release_seq *pending) const
1966 {
1967         /* Only check for release sequences if there are no cycles */
1968         if (mo_graph->checkForCycles())
1969                 return false;
1970
1971         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1972                 ASSERT(rf->is_write());
1973
1974                 if (rf->is_release())
1975                         release_heads->push_back(rf);
1976                 else if (rf->get_last_fence_release())
1977                         release_heads->push_back(rf->get_last_fence_release());
1978                 if (!rf->is_rmw())
1979                         break; /* End of RMW chain */
1980
1981                 /** @todo Need to be smarter here...  In the linux lock
1982                  * example, this will run to the beginning of the program for
1983                  * every acquire. */
1984                 /** @todo The way to be smarter here is to keep going until 1
1985                  * thread has a release preceded by an acquire and you've seen
1986                  *       both. */
1987
1988                 /* acq_rel RMW is a sufficient stopping condition */
1989                 if (rf->is_acquire() && rf->is_release())
1990                         return true; /* complete */
1991         };
1992         if (!rf) {
1993                 /* read from future: need to settle this later */
1994                 pending->rf = NULL;
1995                 return false; /* incomplete */
1996         }
1997
1998         if (rf->is_release())
1999                 return true; /* complete */
2000
2001         /* else relaxed write
2002          * - check for fence-release in the same thread (29.8, stmt. 3)
2003          * - check modification order for contiguous subsequence
2004          *   -> rf must be same thread as release */
2005
2006         const ModelAction *fence_release = rf->get_last_fence_release();
2007         /* Synchronize with a fence-release unconditionally; we don't need to
2008          * find any more "contiguous subsequence..." for it */
2009         if (fence_release)
2010                 release_heads->push_back(fence_release);
2011
2012         int tid = id_to_int(rf->get_tid());
2013         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
2014         action_list_t *list = &(*thrd_lists)[tid];
2015         action_list_t::const_reverse_iterator rit;
2016
2017         /* Find rf in the thread list */
2018         rit = std::find(list->rbegin(), list->rend(), rf);
2019         ASSERT(rit != list->rend());
2020
2021         /* Find the last {write,fence}-release */
2022         for (; rit != list->rend(); rit++) {
2023                 if (fence_release && *(*rit) < *fence_release)
2024                         break;
2025                 if ((*rit)->is_release())
2026                         break;
2027         }
2028         if (rit == list->rend()) {
2029                 /* No write-release in this thread */
2030                 return true; /* complete */
2031         } else if (fence_release && *(*rit) < *fence_release) {
2032                 /* The fence-release is more recent (and so, "stronger") than
2033                  * the most recent write-release */
2034                 return true; /* complete */
2035         } /* else, need to establish contiguous release sequence */
2036         ModelAction *release = *rit;
2037
2038         ASSERT(rf->same_thread(release));
2039
2040         pending->writes.clear();
2041
2042         bool certain = true;
2043         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2044                 if (id_to_int(rf->get_tid()) == (int)i)
2045                         continue;
2046                 list = &(*thrd_lists)[i];
2047
2048                 /* Can we ensure no future writes from this thread may break
2049                  * the release seq? */
2050                 bool future_ordered = false;
2051
2052                 ModelAction *last = get_last_action(int_to_id(i));
2053                 Thread *th = get_thread(int_to_id(i));
2054                 if ((last && rf->happens_before(last)) ||
2055                                 !is_enabled(th) ||
2056                                 th->is_complete())
2057                         future_ordered = true;
2058
2059                 ASSERT(!th->is_model_thread() || future_ordered);
2060
2061                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2062                         const ModelAction *act = *rit;
2063                         /* Reach synchronization -> this thread is complete */
2064                         if (act->happens_before(release))
2065                                 break;
2066                         if (rf->happens_before(act)) {
2067                                 future_ordered = true;
2068                                 continue;
2069                         }
2070
2071                         /* Only non-RMW writes can break release sequences */
2072                         if (!act->is_write() || act->is_rmw())
2073                                 continue;
2074
2075                         /* Check modification order */
2076                         if (mo_graph->checkReachable(rf, act)) {
2077                                 /* rf --mo--> act */
2078                                 future_ordered = true;
2079                                 continue;
2080                         }
2081                         if (mo_graph->checkReachable(act, release))
2082                                 /* act --mo--> release */
2083                                 break;
2084                         if (mo_graph->checkReachable(release, act) &&
2085                                       mo_graph->checkReachable(act, rf)) {
2086                                 /* release --mo-> act --mo--> rf */
2087                                 return true; /* complete */
2088                         }
2089                         /* act may break release sequence */
2090                         pending->writes.push_back(act);
2091                         certain = false;
2092                 }
2093                 if (!future_ordered)
2094                         certain = false; /* This thread is uncertain */
2095         }
2096
2097         if (certain) {
2098                 release_heads->push_back(release);
2099                 pending->writes.clear();
2100         } else {
2101                 pending->release = release;
2102                 pending->rf = rf;
2103         }
2104         return certain;
2105 }
2106
2107 /**
2108  * An interface for getting the release sequence head(s) with which a
2109  * given ModelAction must synchronize. This function only returns a non-empty
2110  * result when it can locate a release sequence head with certainty. Otherwise,
2111  * it may mark the internal state of the ModelExecution so that it will handle
2112  * the release sequence at a later time, causing @a acquire to update its
2113  * synchronization at some later point in execution.
2114  *
2115  * @param acquire The 'acquire' action that may synchronize with a release
2116  * sequence
2117  * @param read The read action that may read from a release sequence; this may
2118  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2119  * when 'acquire' is a fence-acquire)
2120  * @param release_heads A pass-by-reference return parameter. Will be filled
2121  * with the head(s) of the release sequence(s), if they exists with certainty.
2122  * @see ModelExecution::release_seq_heads
2123  */
2124 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2125                 ModelAction *read, rel_heads_list_t *release_heads)
2126 {
2127         const ModelAction *rf = read->get_reads_from();
2128         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2129         sequence->acquire = acquire;
2130         sequence->read = read;
2131
2132         if (!release_seq_heads(rf, release_heads, sequence)) {
2133                 /* add act to 'lazy checking' list */
2134                 pending_rel_seqs.push_back(sequence);
2135         } else {
2136                 snapshot_free(sequence);
2137         }
2138 }
2139
2140 /**
2141  * @brief Propagate a modified clock vector to actions later in the execution
2142  * order
2143  *
2144  * After an acquire operation lazily completes a release-sequence
2145  * synchronization, we must update all clock vectors for operations later than
2146  * the acquire in the execution order.
2147  *
2148  * @param acquire The ModelAction whose clock vector must be propagated
2149  * @param work The work queue to which we can add work items, if this
2150  * propagation triggers more updates (e.g., to the modification order)
2151  */
2152 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2153 {
2154         /* Re-check all pending release sequences */
2155         work->push_back(CheckRelSeqWorkEntry(NULL));
2156         /* Re-check read-acquire for mo_graph edges */
2157         work->push_back(MOEdgeWorkEntry(acquire));
2158
2159         /* propagate synchronization to later actions */
2160         action_list_t::reverse_iterator rit = action_trace.rbegin();
2161         for (; (*rit) != acquire; rit++) {
2162                 ModelAction *propagate = *rit;
2163                 if (acquire->happens_before(propagate)) {
2164                         synchronize(acquire, propagate);
2165                         /* Re-check 'propagate' for mo_graph edges */
2166                         work->push_back(MOEdgeWorkEntry(propagate));
2167                 }
2168         }
2169 }
2170
2171 /**
2172  * Attempt to resolve all stashed operations that might synchronize with a
2173  * release sequence for a given location. This implements the "lazy" portion of
2174  * determining whether or not a release sequence was contiguous, since not all
2175  * modification order information is present at the time an action occurs.
2176  *
2177  * @param location The location/object that should be checked for release
2178  * sequence resolutions. A NULL value means to check all locations.
2179  * @param work_queue The work queue to which to add work items as they are
2180  * generated
2181  * @return True if any updates occurred (new synchronization, new mo_graph
2182  * edges)
2183  */
2184 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2185 {
2186         bool updated = false;
2187         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2188         while (it != pending_rel_seqs.end()) {
2189                 struct release_seq *pending = *it;
2190                 ModelAction *acquire = pending->acquire;
2191                 const ModelAction *read = pending->read;
2192
2193                 /* Only resolve sequences on the given location, if provided */
2194                 if (location && read->get_location() != location) {
2195                         it++;
2196                         continue;
2197                 }
2198
2199                 const ModelAction *rf = read->get_reads_from();
2200                 rel_heads_list_t release_heads;
2201                 bool complete;
2202                 complete = release_seq_heads(rf, &release_heads, pending);
2203                 for (unsigned int i = 0; i < release_heads.size(); i++)
2204                         if (!acquire->has_synchronized_with(release_heads[i]))
2205                                 if (synchronize(release_heads[i], acquire))
2206                                         updated = true;
2207
2208                 if (updated) {
2209                         /* Propagate the changed clock vector */
2210                         propagate_clockvector(acquire, work_queue);
2211                 }
2212                 if (complete) {
2213                         it = pending_rel_seqs.erase(it);
2214                         snapshot_free(pending);
2215                 } else {
2216                         it++;
2217                 }
2218         }
2219
2220         // If we resolved promises or data races, see if we have realized a data race.
2221         checkDataRaces();
2222
2223         return updated;
2224 }
2225
2226 /**
2227  * Performs various bookkeeping operations for the current ModelAction. For
2228  * instance, adds action to the per-object, per-thread action vector and to the
2229  * action trace list of all thread actions.
2230  *
2231  * @param act is the ModelAction to add.
2232  */
2233 void ModelExecution::add_action_to_lists(ModelAction *act)
2234 {
2235         int tid = id_to_int(act->get_tid());
2236         ModelAction *uninit = NULL;
2237         int uninit_id = -1;
2238         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2239         if (list->empty() && act->is_atomic_var()) {
2240                 uninit = get_uninitialized_action(act);
2241                 uninit_id = id_to_int(uninit->get_tid());
2242                 list->push_front(uninit);
2243         }
2244         list->push_back(act);
2245
2246         action_trace.push_back(act);
2247         if (uninit)
2248                 action_trace.push_front(uninit);
2249
2250         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2251         if (tid >= (int)vec->size())
2252                 vec->resize(priv->next_thread_id);
2253         (*vec)[tid].push_back(act);
2254         if (uninit)
2255                 (*vec)[uninit_id].push_front(uninit);
2256
2257         if ((int)thrd_last_action.size() <= tid)
2258                 thrd_last_action.resize(get_num_threads());
2259         thrd_last_action[tid] = act;
2260         if (uninit)
2261                 thrd_last_action[uninit_id] = uninit;
2262
2263         if (act->is_fence() && act->is_release()) {
2264                 if ((int)thrd_last_fence_release.size() <= tid)
2265                         thrd_last_fence_release.resize(get_num_threads());
2266                 thrd_last_fence_release[tid] = act;
2267         }
2268
2269         if (act->is_wait()) {
2270                 void *mutex_loc = (void *) act->get_value();
2271                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2272
2273                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2274                 if (tid >= (int)vec->size())
2275                         vec->resize(priv->next_thread_id);
2276                 (*vec)[tid].push_back(act);
2277         }
2278 }
2279
2280 /**
2281  * @brief Get the last action performed by a particular Thread
2282  * @param tid The thread ID of the Thread in question
2283  * @return The last action in the thread
2284  */
2285 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2286 {
2287         int threadid = id_to_int(tid);
2288         if (threadid < (int)thrd_last_action.size())
2289                 return thrd_last_action[id_to_int(tid)];
2290         else
2291                 return NULL;
2292 }
2293
2294 /**
2295  * @brief Get the last fence release performed by a particular Thread
2296  * @param tid The thread ID of the Thread in question
2297  * @return The last fence release in the thread, if one exists; NULL otherwise
2298  */
2299 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2300 {
2301         int threadid = id_to_int(tid);
2302         if (threadid < (int)thrd_last_fence_release.size())
2303                 return thrd_last_fence_release[id_to_int(tid)];
2304         else
2305                 return NULL;
2306 }
2307
2308 /**
2309  * Gets the last memory_order_seq_cst write (in the total global sequence)
2310  * performed on a particular object (i.e., memory location), not including the
2311  * current action.
2312  * @param curr The current ModelAction; also denotes the object location to
2313  * check
2314  * @return The last seq_cst write
2315  */
2316 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2317 {
2318         void *location = curr->get_location();
2319         action_list_t *list = obj_map.get(location);
2320         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2321         action_list_t::reverse_iterator rit;
2322         for (rit = list->rbegin(); (*rit) != curr; rit++)
2323                 ;
2324         rit++; /* Skip past curr */
2325         for ( ; rit != list->rend(); rit++)
2326                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2327                         return *rit;
2328         return NULL;
2329 }
2330
2331 /**
2332  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2333  * performed in a particular thread, prior to a particular fence.
2334  * @param tid The ID of the thread to check
2335  * @param before_fence The fence from which to begin the search; if NULL, then
2336  * search for the most recent fence in the thread.
2337  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2338  */
2339 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2340 {
2341         /* All fences should have location FENCE_LOCATION */
2342         action_list_t *list = obj_map.get(FENCE_LOCATION);
2343
2344         if (!list)
2345                 return NULL;
2346
2347         action_list_t::reverse_iterator rit = list->rbegin();
2348
2349         if (before_fence) {
2350                 for (; rit != list->rend(); rit++)
2351                         if (*rit == before_fence)
2352                                 break;
2353
2354                 ASSERT(*rit == before_fence);
2355                 rit++;
2356         }
2357
2358         for (; rit != list->rend(); rit++)
2359                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2360                         return *rit;
2361         return NULL;
2362 }
2363
2364 /**
2365  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2366  * location). This function identifies the mutex according to the current
2367  * action, which is presumed to perform on the same mutex.
2368  * @param curr The current ModelAction; also denotes the object location to
2369  * check
2370  * @return The last unlock operation
2371  */
2372 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2373 {
2374         void *location = curr->get_location();
2375         action_list_t *list = obj_map.get(location);
2376         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2377         action_list_t::reverse_iterator rit;
2378         for (rit = list->rbegin(); rit != list->rend(); rit++)
2379                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2380                         return *rit;
2381         return NULL;
2382 }
2383
2384 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2385 {
2386         ModelAction *parent = get_last_action(tid);
2387         if (!parent)
2388                 parent = get_thread(tid)->get_creation();
2389         return parent;
2390 }
2391
2392 /**
2393  * Returns the clock vector for a given thread.
2394  * @param tid The thread whose clock vector we want
2395  * @return Desired clock vector
2396  */
2397 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2398 {
2399         return get_parent_action(tid)->get_cv();
2400 }
2401
2402 /**
2403  * @brief Find the promise (if any) to resolve for the current action and
2404  * remove it from the pending promise vector
2405  * @param curr The current ModelAction. Should be a write.
2406  * @return The Promise to resolve, if any; otherwise NULL
2407  */
2408 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2409 {
2410         for (unsigned int i = 0; i < promises.size(); i++)
2411                 if (curr->get_node()->get_promise(i)) {
2412                         Promise *ret = promises[i];
2413                         promises.erase(promises.begin() + i);
2414                         return ret;
2415                 }
2416         return NULL;
2417 }
2418
2419 /**
2420  * Resolve a Promise with a current write.
2421  * @param write The ModelAction that is fulfilling Promises
2422  * @param promise The Promise to resolve
2423  * @param work The work queue, for adding new fixup work
2424  * @return True if the Promise was successfully resolved; false otherwise
2425  */
2426 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2427                 work_queue_t *work)
2428 {
2429         ModelVector<ModelAction *> actions_to_check;
2430
2431         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2432                 ModelAction *read = promise->get_reader(i);
2433                 if (read_from(read, write)) {
2434                         /* Propagate the changed clock vector */
2435                         propagate_clockvector(read, work);
2436                 }
2437                 actions_to_check.push_back(read);
2438         }
2439         /* Make sure the promise's value matches the write's value */
2440         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2441         if (!mo_graph->resolvePromise(promise, write))
2442                 priv->hard_failed_promise = true;
2443
2444         /**
2445          * @todo  It is possible to end up in an inconsistent state, where a
2446          * "resolved" promise may still be referenced if
2447          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2448          *
2449          * Note that the inconsistency only matters when dumping mo_graph to
2450          * file.
2451          *
2452          * delete promise;
2453          */
2454
2455         //Check whether reading these writes has made threads unable to
2456         //resolve promises
2457         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2458                 ModelAction *read = actions_to_check[i];
2459                 mo_check_promises(read, true);
2460         }
2461
2462         return true;
2463 }
2464
2465 /**
2466  * Compute the set of promises that could potentially be satisfied by this
2467  * action. Note that the set computation actually appears in the Node, not in
2468  * ModelExecution.
2469  * @param curr The ModelAction that may satisfy promises
2470  */
2471 void ModelExecution::compute_promises(ModelAction *curr)
2472 {
2473         for (unsigned int i = 0; i < promises.size(); i++) {
2474                 Promise *promise = promises[i];
2475                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2476                         continue;
2477
2478                 bool satisfy = true;
2479                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2480                         const ModelAction *act = promise->get_reader(j);
2481                         if (act->happens_before(curr) ||
2482                                         act->could_synchronize_with(curr)) {
2483                                 satisfy = false;
2484                                 break;
2485                         }
2486                 }
2487                 if (satisfy)
2488                         curr->get_node()->set_promise(i);
2489         }
2490 }
2491
2492 /** Checks promises in response to change in ClockVector Threads. */
2493 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2494 {
2495         for (unsigned int i = 0; i < promises.size(); i++) {
2496                 Promise *promise = promises[i];
2497                 if (!promise->thread_is_available(tid))
2498                         continue;
2499                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2500                         const ModelAction *act = promise->get_reader(j);
2501                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2502                                         merge_cv->synchronized_since(act)) {
2503                                 if (promise->eliminate_thread(tid)) {
2504                                         /* Promise has failed */
2505                                         priv->failed_promise = true;
2506                                         return;
2507                                 }
2508                         }
2509                 }
2510         }
2511 }
2512
2513 void ModelExecution::check_promises_thread_disabled()
2514 {
2515         for (unsigned int i = 0; i < promises.size(); i++) {
2516                 Promise *promise = promises[i];
2517                 if (promise->has_failed()) {
2518                         priv->failed_promise = true;
2519                         return;
2520                 }
2521         }
2522 }
2523
2524 /**
2525  * @brief Checks promises in response to addition to modification order for
2526  * threads.
2527  *
2528  * We test whether threads are still available for satisfying promises after an
2529  * addition to our modification order constraints. Those that are unavailable
2530  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2531  * that promise has failed.
2532  *
2533  * @param act The ModelAction which updated the modification order
2534  * @param is_read_check Should be true if act is a read and we must check for
2535  * updates to the store from which it read (there is a distinction here for
2536  * RMW's, which are both a load and a store)
2537  */
2538 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2539 {
2540         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2541
2542         for (unsigned int i = 0; i < promises.size(); i++) {
2543                 Promise *promise = promises[i];
2544
2545                 // Is this promise on the same location?
2546                 if (!promise->same_location(write))
2547                         continue;
2548
2549                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2550                         const ModelAction *pread = promise->get_reader(j);
2551                         if (!pread->happens_before(act))
2552                                continue;
2553                         if (mo_graph->checkPromise(write, promise)) {
2554                                 priv->hard_failed_promise = true;
2555                                 return;
2556                         }
2557                         break;
2558                 }
2559
2560                 // Don't do any lookups twice for the same thread
2561                 if (!promise->thread_is_available(act->get_tid()))
2562                         continue;
2563
2564                 if (mo_graph->checkReachable(promise, write)) {
2565                         if (mo_graph->checkPromise(write, promise)) {
2566                                 priv->hard_failed_promise = true;
2567                                 return;
2568                         }
2569                 }
2570         }
2571 }
2572
2573 /**
2574  * Compute the set of writes that may break the current pending release
2575  * sequence. This information is extracted from previou release sequence
2576  * calculations.
2577  *
2578  * @param curr The current ModelAction. Must be a release sequence fixup
2579  * action.
2580  */
2581 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2582 {
2583         if (pending_rel_seqs.empty())
2584                 return;
2585
2586         struct release_seq *pending = pending_rel_seqs.back();
2587         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2588                 const ModelAction *write = pending->writes[i];
2589                 curr->get_node()->add_relseq_break(write);
2590         }
2591
2592         /* NULL means don't break the sequence; just synchronize */
2593         curr->get_node()->add_relseq_break(NULL);
2594 }
2595
2596 /**
2597  * Build up an initial set of all past writes that this 'read' action may read
2598  * from, as well as any previously-observed future values that must still be valid.
2599  *
2600  * @param curr is the current ModelAction that we are exploring; it must be a
2601  * 'read' operation.
2602  */
2603 void ModelExecution::build_may_read_from(ModelAction *curr)
2604 {
2605         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2606         unsigned int i;
2607         ASSERT(curr->is_read());
2608
2609         ModelAction *last_sc_write = NULL;
2610
2611         if (curr->is_seqcst())
2612                 last_sc_write = get_last_seq_cst_write(curr);
2613
2614         /* Iterate over all threads */
2615         for (i = 0; i < thrd_lists->size(); i++) {
2616                 /* Iterate over actions in thread, starting from most recent */
2617                 action_list_t *list = &(*thrd_lists)[i];
2618                 action_list_t::reverse_iterator rit;
2619                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2620                         ModelAction *act = *rit;
2621
2622                         /* Only consider 'write' actions */
2623                         if (!act->is_write() || act == curr)
2624                                 continue;
2625
2626                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2627                         bool allow_read = true;
2628
2629                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2630                                 allow_read = false;
2631                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2632                                 allow_read = false;
2633
2634                         if (allow_read) {
2635                                 /* Only add feasible reads */
2636                                 mo_graph->startChanges();
2637                                 r_modification_order(curr, act);
2638                                 if (!is_infeasible())
2639                                         curr->get_node()->add_read_from_past(act);
2640                                 mo_graph->rollbackChanges();
2641                         }
2642
2643                         /* Include at most one act per-thread that "happens before" curr */
2644                         if (act->happens_before(curr))
2645                                 break;
2646                 }
2647         }
2648
2649         /* Inherit existing, promised future values */
2650         for (i = 0; i < promises.size(); i++) {
2651                 const Promise *promise = promises[i];
2652                 const ModelAction *promise_read = promise->get_reader(0);
2653                 if (promise_read->same_var(curr)) {
2654                         /* Only add feasible future-values */
2655                         mo_graph->startChanges();
2656                         r_modification_order(curr, promise);
2657                         if (!is_infeasible())
2658                                 curr->get_node()->add_read_from_promise(promise_read);
2659                         mo_graph->rollbackChanges();
2660                 }
2661         }
2662
2663         /* We may find no valid may-read-from only if the execution is doomed */
2664         if (!curr->get_node()->read_from_size()) {
2665                 priv->no_valid_reads = true;
2666                 set_assert();
2667         }
2668
2669         if (DBG_ENABLED()) {
2670                 model_print("Reached read action:\n");
2671                 curr->print();
2672                 model_print("Printing read_from_past\n");
2673                 curr->get_node()->print_read_from_past();
2674                 model_print("End printing read_from_past\n");
2675         }
2676 }
2677
2678 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2679 {
2680         for ( ; write != NULL; write = write->get_reads_from()) {
2681                 /* UNINIT actions don't have a Node, and they never sleep */
2682                 if (write->is_uninitialized())
2683                         return true;
2684                 Node *prevnode = write->get_node()->get_parent();
2685
2686                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2687                 if (write->is_release() && thread_sleep)
2688                         return true;
2689                 if (!write->is_rmw())
2690                         return false;
2691         }
2692         return true;
2693 }
2694
2695 /**
2696  * @brief Get an action representing an uninitialized atomic
2697  *
2698  * This function may create a new one or try to retrieve one from the NodeStack
2699  *
2700  * @param curr The current action, which prompts the creation of an UNINIT action
2701  * @return A pointer to the UNINIT ModelAction
2702  */
2703 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2704 {
2705         Node *node = curr->get_node();
2706         ModelAction *act = node->get_uninit_action();
2707         if (!act) {
2708                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2709                 node->set_uninit_action(act);
2710         }
2711         act->create_cv(NULL);
2712         return act;
2713 }
2714
2715 static void print_list(const action_list_t *list)
2716 {
2717         action_list_t::const_iterator it;
2718
2719         model_print("------------------------------------------------------------------------------------\n");
2720         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2721         model_print("------------------------------------------------------------------------------------\n");
2722
2723         unsigned int hash = 0;
2724
2725         for (it = list->begin(); it != list->end(); it++) {
2726                 const ModelAction *act = *it;
2727                 if (act->get_seq_number() > 0)
2728                         act->print();
2729                 hash = hash^(hash<<3)^((*it)->hash());
2730         }
2731         model_print("HASH %u\n", hash);
2732         model_print("------------------------------------------------------------------------------------\n");
2733 }
2734
2735 #if SUPPORT_MOD_ORDER_DUMP
2736 void ModelExecution::dumpGraph(char *filename) const
2737 {
2738         char buffer[200];
2739         sprintf(buffer, "%s.dot", filename);
2740         FILE *file = fopen(buffer, "w");
2741         fprintf(file, "digraph %s {\n", filename);
2742         mo_graph->dumpNodes(file);
2743         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2744
2745         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2746                 ModelAction *act = *it;
2747                 if (act->is_read()) {
2748                         mo_graph->dot_print_node(file, act);
2749                         if (act->get_reads_from())
2750                                 mo_graph->dot_print_edge(file,
2751                                                 act->get_reads_from(),
2752                                                 act,
2753                                                 "label=\"rf\", color=red, weight=2");
2754                         else
2755                                 mo_graph->dot_print_edge(file,
2756                                                 act->get_reads_from_promise(),
2757                                                 act,
2758                                                 "label=\"rf\", color=red");
2759                 }
2760                 if (thread_array[act->get_tid()]) {
2761                         mo_graph->dot_print_edge(file,
2762                                         thread_array[id_to_int(act->get_tid())],
2763                                         act,
2764                                         "label=\"sb\", color=blue, weight=400");
2765                 }
2766
2767                 thread_array[act->get_tid()] = act;
2768         }
2769         fprintf(file, "}\n");
2770         model_free(thread_array);
2771         fclose(file);
2772 }
2773 #endif
2774
2775 /** @brief Prints an execution trace summary. */
2776 void ModelExecution::print_summary() const
2777 {
2778 #if SUPPORT_MOD_ORDER_DUMP
2779         char buffername[100];
2780         sprintf(buffername, "exec%04u", get_execution_number());
2781         mo_graph->dumpGraphToFile(buffername);
2782         sprintf(buffername, "graph%04u", get_execution_number());
2783         dumpGraph(buffername);
2784 #endif
2785
2786         model_print("Execution trace %d:", get_execution_number());
2787         if (isfeasibleprefix()) {
2788                 if (is_yieldblocked())
2789                         model_print(" YIELD BLOCKED");
2790                 if (scheduler->all_threads_sleeping())
2791                         model_print(" SLEEP-SET REDUNDANT");
2792                 if (have_bug_reports())
2793                         model_print(" DETECTED BUG(S)");
2794         } else
2795                 print_infeasibility(" INFEASIBLE");
2796         model_print("\n");
2797
2798         print_list(&action_trace);
2799         model_print("\n");
2800
2801         if (!promises.empty()) {
2802                 model_print("Pending promises:\n");
2803                 for (unsigned int i = 0; i < promises.size(); i++) {
2804                         model_print(" [P%u] ", i);
2805                         promises[i]->print();
2806                 }
2807                 model_print("\n");
2808         }
2809 }
2810
2811 /**
2812  * Add a Thread to the system for the first time. Should only be called once
2813  * per thread.
2814  * @param t The Thread to add
2815  */
2816 void ModelExecution::add_thread(Thread *t)
2817 {
2818         unsigned int i = id_to_int(t->get_id());
2819         if (i >= thread_map.size())
2820                 thread_map.resize(i + 1);
2821         thread_map[i] = t;
2822         if (!t->is_model_thread())
2823                 scheduler->add_thread(t);
2824 }
2825
2826 /**
2827  * @brief Get a Thread reference by its ID
2828  * @param tid The Thread's ID
2829  * @return A Thread reference
2830  */
2831 Thread * ModelExecution::get_thread(thread_id_t tid) const
2832 {
2833         unsigned int i = id_to_int(tid);
2834         if (i < thread_map.size())
2835                 return thread_map[i];
2836         return NULL;
2837 }
2838
2839 /**
2840  * @brief Get a reference to the Thread in which a ModelAction was executed
2841  * @param act The ModelAction
2842  * @return A Thread reference
2843  */
2844 Thread * ModelExecution::get_thread(const ModelAction *act) const
2845 {
2846         return get_thread(act->get_tid());
2847 }
2848
2849 /**
2850  * @brief Get a Promise's "promise number"
2851  *
2852  * A "promise number" is an index number that is unique to a promise, valid
2853  * only for a specific snapshot of an execution trace. Promises may come and go
2854  * as they are generated an resolved, so an index only retains meaning for the
2855  * current snapshot.
2856  *
2857  * @param promise The Promise to check
2858  * @return The promise index, if the promise still is valid; otherwise -1
2859  */
2860 int ModelExecution::get_promise_number(const Promise *promise) const
2861 {
2862         for (unsigned int i = 0; i < promises.size(); i++)
2863                 if (promises[i] == promise)
2864                         return i;
2865         /* Not found */
2866         return -1;
2867 }
2868
2869 /**
2870  * @brief Check if a Thread is currently enabled
2871  * @param t The Thread to check
2872  * @return True if the Thread is currently enabled
2873  */
2874 bool ModelExecution::is_enabled(Thread *t) const
2875 {
2876         return scheduler->is_enabled(t);
2877 }
2878
2879 /**
2880  * @brief Check if a Thread is currently enabled
2881  * @param tid The ID of the Thread to check
2882  * @return True if the Thread is currently enabled
2883  */
2884 bool ModelExecution::is_enabled(thread_id_t tid) const
2885 {
2886         return scheduler->is_enabled(tid);
2887 }
2888
2889 /**
2890  * @brief Select the next thread to execute based on the curren action
2891  *
2892  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2893  * actions should be followed by the execution of their child thread. In either
2894  * case, the current action should determine the next thread schedule.
2895  *
2896  * @param curr The current action
2897  * @return The next thread to run, if the current action will determine this
2898  * selection; otherwise NULL
2899  */
2900 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2901 {
2902         /* Do not split atomic RMW */
2903         if (curr->is_rmwr())
2904                 return get_thread(curr);
2905         if (curr->is_write()) {
2906 //              std::memory_order order = curr->get_mo(); 
2907 //              switch(order) {
2908 //                      case std::memory_order_relaxed: 
2909 //                              return get_thread(curr);
2910 //                      case std::memory_order_release:
2911 //                              return get_thread(curr);
2912 //                      defalut:
2913 //                              return NULL;
2914 //              }       
2915                 return NULL;
2916         }
2917
2918         /* Follow CREATE with the created thread */
2919         /* which is not needed, because model.cc takes care of this */
2920         if (curr->get_type() == THREAD_CREATE)
2921                 return curr->get_thread_operand(); 
2922         if (curr->get_type() == PTHREAD_CREATE) {
2923                 return curr->get_thread_operand();
2924         }
2925         return NULL;
2926 }
2927
2928 /** @return True if the execution has taken too many steps */
2929 bool ModelExecution::too_many_steps() const
2930 {
2931         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2932 }
2933
2934 /**
2935  * Takes the next step in the execution, if possible.
2936  * @param curr The current step to take
2937  * @return Returns the next Thread to run, if any; NULL if this execution
2938  * should terminate
2939  */
2940 Thread * ModelExecution::take_step(ModelAction *curr)
2941 {
2942         Thread *curr_thrd = get_thread(curr);
2943         ASSERT(curr_thrd->get_state() == THREAD_READY);
2944
2945         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2946         curr = check_current_action(curr);
2947         ASSERT(curr);
2948
2949         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2950                 scheduler->remove_thread(curr_thrd);
2951
2952         return action_select_next_thread(curr);
2953 }
2954
2955 /**
2956  * Launch end-of-execution release sequence fixups only when
2957  * the execution is otherwise feasible AND there are:
2958  *
2959  * (1) pending release sequences
2960  * (2) pending assertions that could be invalidated by a change
2961  * in clock vectors (i.e., data races)
2962  * (3) no pending promises
2963  */
2964 void ModelExecution::fixup_release_sequences()
2965 {
2966         while (!pending_rel_seqs.empty() &&
2967                         is_feasible_prefix_ignore_relseq() &&
2968                         haveUnrealizedRaces()) {
2969                 model_print("*** WARNING: release sequence fixup action "
2970                                 "(%zu pending release seuqence(s)) ***\n",
2971                                 pending_rel_seqs.size());
2972                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2973                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2974                                 model_thread);
2975                 take_step(fixup);
2976         };
2977 }