fix the case where no waiter is waiting
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "bugmessage.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 /**
22  * Structure for holding small ModelChecker members that should be snapshotted
23  */
24 struct model_snapshot_members {
25         model_snapshot_members() :
26                 /* First thread created will have id INITIAL_THREAD_ID */
27                 next_thread_id(INITIAL_THREAD_ID),
28                 used_sequence_numbers(0),
29                 next_backtrack(NULL),
30                 bugs(),
31                 failed_promise(false),
32                 hard_failed_promise(false),
33                 too_many_reads(false),
34                 no_valid_reads(false),
35                 bad_synchronization(false),
36                 bad_sc_read(false),
37                 asserted(false)
38         { }
39
40         ~model_snapshot_members() {
41                 for (unsigned int i = 0; i < bugs.size(); i++)
42                         delete bugs[i];
43                 bugs.clear();
44         }
45
46         unsigned int next_thread_id;
47         modelclock_t used_sequence_numbers;
48         ModelAction *next_backtrack;
49         SnapVector<bug_message *> bugs;
50         bool failed_promise;
51         bool hard_failed_promise;
52         bool too_many_reads;
53         bool no_valid_reads;
54         /** @brief Incorrectly-ordered synchronization was made */
55         bool bad_synchronization;
56         bool bad_sc_read;
57         bool asserted;
58
59         SNAPSHOTALLOC
60 };
61
62 /** @brief Constructor */
63 ModelExecution::ModelExecution(ModelChecker *m,
64                 const struct model_params *params,
65                 Scheduler *scheduler,
66                 NodeStack *node_stack) :
67         model(m),
68         params(params),
69         scheduler(scheduler),
70         action_trace(),
71         thread_map(2), /* We'll always need at least 2 threads */
72         pthread_map(0),
73         pthread_counter(0),
74         obj_map(),
75         condvar_waiters_map(),
76         obj_thrd_map(),
77         mutex_map(),
78         promises(),
79         futurevalues(),
80         pending_rel_seqs(),
81         thrd_last_action(1),
82         thrd_last_fence_release(),
83         node_stack(node_stack),
84         priv(new struct model_snapshot_members()),
85         mo_graph(new CycleGraph())
86 {
87         /* Initialize a model-checker thread, for special ModelActions */
88         model_thread = new Thread(get_next_id());       // L: Create model thread
89         add_thread(model_thread);                       // L: Add model thread to scheduler
90         scheduler->register_engine(this);
91         node_stack->register_engine(this);
92 }
93
94 /** @brief Destructor */
95 ModelExecution::~ModelExecution()
96 {
97         for (unsigned int i = 0; i < get_num_threads(); i++)
98                 delete get_thread(int_to_id(i));
99
100         for (unsigned int i = 0; i < promises.size(); i++)
101                 delete promises[i];
102
103         delete mo_graph;
104         delete priv;
105 }
106
107 int ModelExecution::get_execution_number() const
108 {
109         return model->get_execution_number();
110 }
111
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
113 {
114         action_list_t *tmp = hash->get(ptr);
115         if (tmp == NULL) {
116                 tmp = new action_list_t();
117                 hash->put(ptr, tmp);
118         }
119         return tmp;
120 }
121
122 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
123 {
124         SnapVector<action_list_t> *tmp = hash->get(ptr);
125         if (tmp == NULL) {
126                 tmp = new SnapVector<action_list_t>();
127                 hash->put(ptr, tmp);
128         }
129         return tmp;
130 }
131
132 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
133 {
134         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
135         if (wrv==NULL)
136                 return NULL;
137         unsigned int thread=id_to_int(tid);
138         if (thread < wrv->size())
139                 return &(*wrv)[thread];
140         else
141                 return NULL;
142 }
143
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelExecution::get_next_id()
146 {
147         return priv->next_thread_id++;
148 }
149
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelExecution::get_num_threads() const
152 {
153         return priv->next_thread_id;
154 }
155
156 /** @return a sequence number for a new ModelAction */
157 modelclock_t ModelExecution::get_next_seq_num()
158 {
159         return ++priv->used_sequence_numbers;
160 }
161
162 /**
163  * @brief Should the current action wake up a given thread?
164  *
165  * @param curr The current action
166  * @param thread The thread that we might wake up
167  * @return True, if we should wake up the sleeping thread; false otherwise
168  */
169 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
170 {
171         const ModelAction *asleep = thread->get_pending();
172         /* Don't allow partial RMW to wake anyone up */
173         if (curr->is_rmwr())
174                 return false;
175         /* Synchronizing actions may have been backtracked */
176         if (asleep->could_synchronize_with(curr))
177                 return true;
178         /* All acquire/release fences and fence-acquire/store-release */
179         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
180                 return true;
181         /* Fence-release + store can awake load-acquire on the same location */
182         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
183                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
184                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
185                         return true;
186         }
187         return false;
188 }
189
190 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
191 {
192         for (unsigned int i = 0; i < get_num_threads(); i++) {
193                 Thread *thr = get_thread(int_to_id(i));
194                 if (scheduler->is_sleep_set(thr)) {
195                         if (should_wake_up(curr, thr))
196                                 /* Remove this thread from sleep set */
197                                 scheduler->remove_sleep(thr);
198                 }
199         }
200 }
201
202 /** @brief Alert the model-checker that an incorrectly-ordered
203  * synchronization was made */
204 void ModelExecution::set_bad_synchronization()
205 {
206         priv->bad_synchronization = true;
207 }
208
209 /** @brief Alert the model-checker that an incorrectly-ordered
210  * synchronization was made */
211 void ModelExecution::set_bad_sc_read()
212 {
213         priv->bad_sc_read = true;
214 }
215
216 bool ModelExecution::assert_bug(const char *msg)
217 {
218         priv->bugs.push_back(new bug_message(msg));
219
220         if (isfeasibleprefix()) {
221                 set_assert();
222                 return true;
223         }
224         return false;
225 }
226
227 /** @return True, if any bugs have been reported for this execution */
228 bool ModelExecution::have_bug_reports() const
229 {
230         return priv->bugs.size() != 0;
231 }
232
233 SnapVector<bug_message *> * ModelExecution::get_bugs() const
234 {
235         return &priv->bugs;
236 }
237
238 /**
239  * Check whether the current trace has triggered an assertion which should halt
240  * its execution.
241  *
242  * @return True, if the execution should be aborted; false otherwise
243  */
244 bool ModelExecution::has_asserted() const
245 {
246         return priv->asserted;
247 }
248
249 /**
250  * Trigger a trace assertion which should cause this execution to be halted.
251  * This can be due to a detected bug or due to an infeasibility that should
252  * halt ASAP.
253  */
254 void ModelExecution::set_assert()
255 {
256         priv->asserted = true;
257 }
258
259 /**
260  * Check if we are in a deadlock. Should only be called at the end of an
261  * execution, although it should not give false positives in the middle of an
262  * execution (there should be some ENABLED thread).
263  *
264  * @return True if program is in a deadlock; false otherwise
265  */
266 bool ModelExecution::is_deadlocked() const
267 {
268         bool blocking_threads = false;
269         for (unsigned int i = 0; i < get_num_threads(); i++) {
270                 thread_id_t tid = int_to_id(i);
271                 if (is_enabled(tid))
272                         return false;
273                 Thread *t = get_thread(tid);
274                 if (!t->is_model_thread() && t->get_pending())
275                         blocking_threads = true;
276         }
277         return blocking_threads;
278 }
279
280 /**
281  * @brief Check if we are yield-blocked
282  *
283  * A program can be "yield-blocked" if all threads are ready to execute a
284  * yield.
285  *
286  * @return True if the program is yield-blocked; false otherwise
287  */
288 bool ModelExecution::is_yieldblocked() const
289 {
290         if (!params->yieldblock)
291                 return false;
292
293         for (unsigned int i = 0; i < get_num_threads(); i++) {
294                 thread_id_t tid = int_to_id(i);
295                 Thread *t = get_thread(tid);
296                 if (t->get_pending() && t->get_pending()->is_yield())
297                         return true;
298         }
299         return false;
300 }
301
302 /**
303  * Check if this is a complete execution. That is, have all thread completed
304  * execution (rather than exiting because sleep sets have forced a redundant
305  * execution).
306  *
307  * @return True if the execution is complete.
308  */
309 bool ModelExecution::is_complete_execution() const
310 {
311         if (is_yieldblocked())
312                 return false;
313         for (unsigned int i = 0; i < get_num_threads(); i++)
314                 if (is_enabled(int_to_id(i)))
315                         return false;
316         return true;
317 }
318
319 /**
320  * @brief Find the last fence-related backtracking conflict for a ModelAction
321  *
322  * This function performs the search for the most recent conflicting action
323  * against which we should perform backtracking, as affected by fence
324  * operations. This includes pairs of potentially-synchronizing actions which
325  * occur due to fence-acquire or fence-release, and hence should be explored in
326  * the opposite execution order.
327  *
328  * @param act The current action
329  * @return The most recent action which conflicts with act due to fences
330  */
331 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
332 {
333         /* Only perform release/acquire fence backtracking for stores */
334         if (!act->is_write())
335                 return NULL;
336
337         /* Find a fence-release (or, act is a release) */
338         ModelAction *last_release;
339         if (act->is_release())
340                 last_release = act;
341         else
342                 last_release = get_last_fence_release(act->get_tid());
343         if (!last_release)
344                 return NULL;
345
346         /* Skip past the release */
347         const action_list_t *list = &action_trace;
348         action_list_t::const_reverse_iterator rit;
349         for (rit = list->rbegin(); rit != list->rend(); rit++)
350                 if (*rit == last_release)
351                         break;
352         ASSERT(rit != list->rend());
353
354         /* Find a prior:
355          *   load-acquire
356          * or
357          *   load --sb-> fence-acquire */
358         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
359         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
360         bool found_acquire_fences = false;
361         for ( ; rit != list->rend(); rit++) {
362                 ModelAction *prev = *rit;
363                 if (act->same_thread(prev))
364                         continue;
365
366                 int tid = id_to_int(prev->get_tid());
367
368                 if (prev->is_read() && act->same_var(prev)) {
369                         if (prev->is_acquire()) {
370                                 /* Found most recent load-acquire, don't need
371                                  * to search for more fences */
372                                 if (!found_acquire_fences)
373                                         return NULL;
374                         } else {
375                                 prior_loads[tid] = prev;
376                         }
377                 }
378                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
379                         found_acquire_fences = true;
380                         acquire_fences[tid] = prev;
381                 }
382         }
383
384         ModelAction *latest_backtrack = NULL;
385         for (unsigned int i = 0; i < acquire_fences.size(); i++)
386                 if (acquire_fences[i] && prior_loads[i])
387                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
388                                 latest_backtrack = acquire_fences[i];
389         return latest_backtrack;
390 }
391
392 /**
393  * @brief Find the last backtracking conflict for a ModelAction
394  *
395  * This function performs the search for the most recent conflicting action
396  * against which we should perform backtracking. This primary includes pairs of
397  * synchronizing actions which should be explored in the opposite execution
398  * order.
399  *
400  * @param act The current action
401  * @return The most recent action which conflicts with act
402  */
403 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
404 {
405         switch (act->get_type()) {
406         case ATOMIC_FENCE:
407                 /* Only seq-cst fences can (directly) cause backtracking */
408                 if (!act->is_seqcst())
409                         break;
410         case ATOMIC_READ:
411         case ATOMIC_WRITE:
412         case ATOMIC_RMW: {
413                 ModelAction *ret = NULL;
414
415                 /* linear search: from most recent to oldest */
416                 action_list_t *list = obj_map.get(act->get_location());
417                 action_list_t::reverse_iterator rit;
418                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
419                         ModelAction *prev = *rit;
420                         if (prev == act)
421                                 continue;
422                         if (prev->could_synchronize_with(act)) {
423                                 ret = prev;
424                                 break;
425                         }
426                 }
427
428                 ModelAction *ret2 = get_last_fence_conflict(act);
429                 if (!ret2)
430                         return ret;
431                 if (!ret)
432                         return ret2;
433                 if (*ret < *ret2)
434                         return ret2;
435                 return ret;
436         }
437         case ATOMIC_LOCK:
438         case ATOMIC_TRYLOCK: {
439                 /* linear search: from most recent to oldest */
440                 action_list_t *list = obj_map.get(act->get_location());
441                 action_list_t::reverse_iterator rit;
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *prev = *rit;
444                         if (act->is_conflicting_lock(prev))
445                                 return prev;
446                 }
447                 break;
448         }
449         case ATOMIC_UNLOCK: {
450                 /* linear search: from most recent to oldest */
451                 action_list_t *list = obj_map.get(act->get_location());
452                 action_list_t::reverse_iterator rit;
453                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454                         ModelAction *prev = *rit;
455                         if (!act->same_thread(prev) && prev->is_failed_trylock())
456                                 return prev;
457                 }
458                 break;
459         }
460         case ATOMIC_WAIT: {
461                 /* linear search: from most recent to oldest */
462                 action_list_t *list = obj_map.get(act->get_location());
463                 action_list_t::reverse_iterator rit;
464                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
465                         ModelAction *prev = *rit;
466                         if (!act->same_thread(prev) && prev->is_failed_trylock())
467                                 return prev;
468                         if (!act->same_thread(prev) && prev->is_notify())
469                                 return prev;
470                 }
471                 break;
472         }
473
474         case ATOMIC_NOTIFY_ALL:
475         case ATOMIC_NOTIFY_ONE: {
476                 /* linear search: from most recent to oldest */
477                 action_list_t *list = obj_map.get(act->get_location());
478                 action_list_t::reverse_iterator rit;
479                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
480                         ModelAction *prev = *rit;
481                         if (!act->same_thread(prev) && prev->is_wait())
482                                 return prev;
483                 }
484                 break;
485         }
486         default:
487                 break;
488         }
489         return NULL;
490 }
491
492 /** This method finds backtracking points where we should try to
493  * reorder the parameter ModelAction against.
494  *
495  * @param the ModelAction to find backtracking points for.
496  */
497 void ModelExecution::set_backtracking(ModelAction *act)
498 {
499         Thread *t = get_thread(act);
500         ModelAction *prev = get_last_conflict(act);
501         if (prev == NULL)
502                 return;
503
504         Node *node = prev->get_node()->get_parent();
505
506         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
507         int low_tid, high_tid;
508         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
509                 low_tid = id_to_int(act->get_tid());
510                 high_tid = low_tid + 1;
511         } else {
512                 low_tid = 0;
513                 high_tid = get_num_threads();
514         }
515
516         for (int i = low_tid; i < high_tid; i++) {
517                 thread_id_t tid = int_to_id(i);
518
519                 /* Make sure this thread can be enabled here. */
520                 if (i >= node->get_num_threads())
521                         break;
522
523                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
524                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
525                 if (node->enabled_status(tid) != THREAD_ENABLED)
526                         continue;
527
528                 /* Check if this has been explored already */
529                 if (node->has_been_explored(tid))
530                         continue;
531
532                 /* See if fairness allows */
533                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
534                         bool unfair = false;
535                         for (int t = 0; t < node->get_num_threads(); t++) {
536                                 thread_id_t tother = int_to_id(t);
537                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
538                                         unfair = true;
539                                         break;
540                                 }
541                         }
542                         if (unfair)
543                                 continue;
544                 }
545
546                 /* See if CHESS-like yield fairness allows */
547                 if (params->yieldon) {
548                         bool unfair = false;
549                         for (int t = 0; t < node->get_num_threads(); t++) {
550                                 thread_id_t tother = int_to_id(t);
551                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
552                                         unfair = true;
553                                         break;
554                                 }
555                         }
556                         if (unfair)
557                                 continue;
558                 }
559
560                 /* Cache the latest backtracking point */
561                 set_latest_backtrack(prev);
562
563                 /* If this is a new backtracking point, mark the tree */
564                 if (!node->set_backtrack(tid))
565                         continue;
566                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
567                                         id_to_int(prev->get_tid()),
568                                         id_to_int(t->get_id()));
569                 if (DBG_ENABLED()) {
570                         prev->print();
571                         act->print();
572                 }
573         }
574 }
575
576 /**
577  * @brief Cache the a backtracking point as the "most recent", if eligible
578  *
579  * Note that this does not prepare the NodeStack for this backtracking
580  * operation, it only caches the action on a per-execution basis
581  *
582  * @param act The operation at which we should explore a different next action
583  * (i.e., backtracking point)
584  * @return True, if this action is now the most recent backtracking point;
585  * false otherwise
586  */
587 bool ModelExecution::set_latest_backtrack(ModelAction *act)
588 {
589         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
590                 priv->next_backtrack = act;
591                 return true;
592         }
593         return false;
594 }
595
596 /**
597  * Returns last backtracking point. The model checker will explore a different
598  * path for this point in the next execution.
599  * @return The ModelAction at which the next execution should diverge.
600  */
601 ModelAction * ModelExecution::get_next_backtrack()
602 {
603         ModelAction *next = priv->next_backtrack;
604         priv->next_backtrack = NULL;
605         return next;
606 }
607
608 /**
609  * Processes a read model action.
610  * @param curr is the read model action to process.
611  * @return True if processing this read updates the mo_graph.
612  */
613 bool ModelExecution::process_read(ModelAction *curr)
614 {
615         Node *node = curr->get_node();
616         while (true) {
617                 bool updated = false;
618                 switch (node->get_read_from_status()) {
619                 case READ_FROM_PAST: {
620                         const ModelAction *rf = node->get_read_from_past();
621                         ASSERT(rf);
622
623                         mo_graph->startChanges();
624
625                         ASSERT(!is_infeasible());
626                         if (!check_recency(curr, rf)) {
627                                 if (node->increment_read_from()) {
628                                         mo_graph->rollbackChanges();
629                                         continue;
630                                 } else {
631                                         priv->too_many_reads = true;
632                                 }
633                         }
634
635                         updated = r_modification_order(curr, rf);
636                         read_from(curr, rf);
637                         mo_graph->commitChanges();
638                         mo_check_promises(curr, true);
639                         break;
640                 }
641                 case READ_FROM_PROMISE: {
642                         Promise *promise = curr->get_node()->get_read_from_promise();
643                         if (promise->add_reader(curr))
644                                 priv->failed_promise = true;
645                         curr->set_read_from_promise(promise);
646                         mo_graph->startChanges();
647                         if (!check_recency(curr, promise))
648                                 priv->too_many_reads = true;
649                         updated = r_modification_order(curr, promise);
650                         mo_graph->commitChanges();
651                         break;
652                 }
653                 case READ_FROM_FUTURE: {
654                         /* Read from future value */
655                         struct future_value fv = node->get_future_value();
656                         Promise *promise = new Promise(this, curr, fv);
657                         curr->set_read_from_promise(promise);
658                         promises.push_back(promise);
659                         mo_graph->startChanges();
660                         updated = r_modification_order(curr, promise);
661                         mo_graph->commitChanges();
662                         break;
663                 }
664                 default:
665                         ASSERT(false);
666                 }
667                 get_thread(curr)->set_return_value(curr->get_return_value());
668                 return updated;
669         }
670 }
671
672 /**
673  * Processes a lock, trylock, or unlock model action.  @param curr is
674  * the read model action to process.
675  *
676  * The try lock operation checks whether the lock is taken.  If not,
677  * it falls to the normal lock operation case.  If so, it returns
678  * fail.
679  *
680  * The lock operation has already been checked that it is enabled, so
681  * it just grabs the lock and synchronizes with the previous unlock.
682  *
683  * The unlock operation has to re-enable all of the threads that are
684  * waiting on the lock.
685  *
686  * @return True if synchronization was updated; false otherwise
687  */
688 bool ModelExecution::process_mutex(ModelAction *curr)
689 {
690         cdsc::mutex *mutex = curr->get_mutex();
691         struct cdsc::mutex_state *state = NULL;
692
693         if (mutex)
694                 state = mutex->get_state();
695
696         switch (curr->get_type()) {
697         case ATOMIC_TRYLOCK: {
698                 bool success = !state->locked;
699                 curr->set_try_lock(success);
700                 if (!success) {
701                         get_thread(curr)->set_return_value(0);
702                         break;
703                 }
704                 get_thread(curr)->set_return_value(1);
705         }
706                 //otherwise fall into the lock case
707         case ATOMIC_LOCK: {
708                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
709                         assert_bug("Lock access before initialization");
710                 state->locked = get_thread(curr);
711                 ModelAction *unlock = get_last_unlock(curr);
712                 //synchronize with the previous unlock statement
713                 if (unlock != NULL) {
714                         synchronize(unlock, curr);
715                         return true;
716                 }
717                 break;
718         }
719         case ATOMIC_WAIT:
720         case ATOMIC_UNLOCK: {
721                 /* wake up the other threads */
722                 for (unsigned int i = 0; i < get_num_threads(); i++) {
723                         Thread *t = get_thread(int_to_id(i));
724                         Thread *curr_thrd = get_thread(curr);
725                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
726                                 scheduler->wake(t);
727                 }
728
729                 /* unlock the lock - after checking who was waiting on it */
730                 state->locked = NULL;
731
732                 if (!curr->is_wait())
733                         break; /* The rest is only for ATOMIC_WAIT */
734
735                 /* Should we go to sleep? (simulate spurious failures) */
736                 if (curr->get_node()->get_misc() == 0) {
737                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
738                         /* disable us */
739                         scheduler->sleep(get_thread(curr));
740                 }
741                 break;
742         }
743         case ATOMIC_NOTIFY_ALL: {
744                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
745                 //activate all the waiting threads
746                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
747                         scheduler->wake(get_thread(*rit));
748                 }
749                 waiters->clear();
750                 break;
751         }
752         case ATOMIC_NOTIFY_ONE: {
753                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
754                 int wakeupthread = curr->get_node()->get_misc();
755                 action_list_t::iterator it = waiters->begin();
756
757                 // WL
758                 if (it == waiters->end())
759                         break;
760
761                 advance(it, wakeupthread);
762                 scheduler->wake(get_thread(*it));
763                 waiters->erase(it);
764                 break;
765         }
766
767         default:
768                 ASSERT(0);
769         }
770         return false;
771 }
772
773 /**
774  * @brief Check if the current pending promises allow a future value to be sent
775  *
776  * It is unsafe to pass a future value back if there exists a pending promise Pr
777  * such that:
778  *
779  *    reader --exec-> Pr --exec-> writer
780  *
781  * If such Pr exists, we must save the pending future value until Pr is
782  * resolved.
783  *
784  * @param writer The operation which sends the future value. Must be a write.
785  * @param reader The operation which will observe the value. Must be a read.
786  * @return True if the future value can be sent now; false if it must wait.
787  */
788 bool ModelExecution::promises_may_allow(const ModelAction *writer,
789                 const ModelAction *reader) const
790 {
791         for (int i = promises.size() - 1; i >= 0; i--) {
792                 ModelAction *pr = promises[i]->get_reader(0);
793                 //reader is after promise...doesn't cross any promise
794                 if (*reader > *pr)
795                         return true;
796                 //writer is after promise, reader before...bad...
797                 if (*writer > *pr)
798                         return false;
799         }
800         return true;
801 }
802
803 /**
804  * @brief Add a future value to a reader
805  *
806  * This function performs a few additional checks to ensure that the future
807  * value can be feasibly observed by the reader
808  *
809  * @param writer The operation whose value is sent. Must be a write.
810  * @param reader The read operation which may read the future value. Must be a read.
811  */
812 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
813 {
814         /* Do more ambitious checks now that mo is more complete */
815         if (!mo_may_allow(writer, reader))
816                 return;
817
818         Node *node = reader->get_node();
819
820         /* Find an ancestor thread which exists at the time of the reader */
821         Thread *write_thread = get_thread(writer);
822         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
823                 write_thread = write_thread->get_parent();
824
825         struct future_value fv = {
826                 writer->get_write_value(),
827                 writer->get_seq_number() + params->maxfuturedelay,
828                 write_thread->get_id(),
829         };
830         if (node->add_future_value(fv))
831                 set_latest_backtrack(reader);
832 }
833
834 /**
835  * Process a write ModelAction
836  * @param curr The ModelAction to process
837  * @param work The work queue, for adding fixup work
838  * @return True if the mo_graph was updated or promises were resolved
839  */
840 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
841 {
842         /* Readers to which we may send our future value */
843         ModelVector<ModelAction *> send_fv;
844
845         const ModelAction *earliest_promise_reader;
846         bool updated_promises = false;
847
848         bool updated_mod_order = w_modification_order(curr, &send_fv);
849         Promise *promise = pop_promise_to_resolve(curr);
850
851         if (promise) {
852                 earliest_promise_reader = promise->get_reader(0);
853                 updated_promises = resolve_promise(curr, promise, work);
854         } else
855                 earliest_promise_reader = NULL;
856
857         for (unsigned int i = 0; i < send_fv.size(); i++) {
858                 ModelAction *read = send_fv[i];
859
860                 /* Don't send future values to reads after the Promise we resolve */
861                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
862                         /* Check if future value can be sent immediately */
863                         if (promises_may_allow(curr, read)) {
864                                 add_future_value(curr, read);
865                         } else {
866                                 futurevalues.push_back(PendingFutureValue(curr, read));
867                         }
868                 }
869         }
870
871         /* Check the pending future values */
872         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
873                 struct PendingFutureValue pfv = futurevalues[i];
874                 if (promises_may_allow(pfv.writer, pfv.reader)) {
875                         add_future_value(pfv.writer, pfv.reader);
876                         futurevalues.erase(futurevalues.begin() + i);
877                 }
878         }
879
880         mo_graph->commitChanges();
881         mo_check_promises(curr, false);
882
883         get_thread(curr)->set_return_value(VALUE_NONE);
884         return updated_mod_order || updated_promises;
885 }
886
887 /**
888  * Process a fence ModelAction
889  * @param curr The ModelAction to process
890  * @return True if synchronization was updated
891  */
892 bool ModelExecution::process_fence(ModelAction *curr)
893 {
894         /*
895          * fence-relaxed: no-op
896          * fence-release: only log the occurence (not in this function), for
897          *   use in later synchronization
898          * fence-acquire (this function): search for hypothetical release
899          *   sequences
900          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
901          */
902         bool updated = false;
903         if (curr->is_acquire()) {
904                 action_list_t *list = &action_trace;
905                 action_list_t::reverse_iterator rit;
906                 /* Find X : is_read(X) && X --sb-> curr */
907                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
908                         ModelAction *act = *rit;
909                         if (act == curr)
910                                 continue;
911                         if (act->get_tid() != curr->get_tid())
912                                 continue;
913                         /* Stop at the beginning of the thread */
914                         if (act->is_thread_start())
915                                 break;
916                         /* Stop once we reach a prior fence-acquire */
917                         if (act->is_fence() && act->is_acquire())
918                                 break;
919                         if (!act->is_read())
920                                 continue;
921                         /* read-acquire will find its own release sequences */
922                         if (act->is_acquire())
923                                 continue;
924
925                         /* Establish hypothetical release sequences */
926                         rel_heads_list_t release_heads;
927                         get_release_seq_heads(curr, act, &release_heads);
928                         for (unsigned int i = 0; i < release_heads.size(); i++)
929                                 synchronize(release_heads[i], curr);
930                         if (release_heads.size() != 0)
931                                 updated = true;
932                 }
933         }
934         return updated;
935 }
936
937 /**
938  * @brief Process the current action for thread-related activity
939  *
940  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
941  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
942  * synchronization, etc.  This function is a no-op for non-THREAD actions
943  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
944  *
945  * @param curr The current action
946  * @return True if synchronization was updated or a thread completed
947  */
948 bool ModelExecution::process_thread_action(ModelAction *curr)
949 {
950         bool updated = false;
951
952         switch (curr->get_type()) {
953         case THREAD_CREATE: {
954                 thrd_t *thrd = (thrd_t *)curr->get_location();
955                 struct thread_params *params = (struct thread_params *)curr->get_value();
956                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
957                 curr->set_thread_operand(th);
958                 add_thread(th);
959                 th->set_creation(curr);
960                 /* Promises can be satisfied by children */
961                 for (unsigned int i = 0; i < promises.size(); i++) {
962                         Promise *promise = promises[i];
963                         if (promise->thread_is_available(curr->get_tid()))
964                                 promise->add_thread(th->get_id());
965                 }
966                 break;
967         }
968         case PTHREAD_CREATE: {
969                 (*(pthread_t *)curr->get_location()) = pthread_counter++;       
970
971                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
972                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
973                 curr->set_thread_operand(th);
974                 add_thread(th);
975                 th->set_creation(curr);
976
977                 if ( pthread_map.size() < pthread_counter )
978                         pthread_map.resize( pthread_counter );
979                 pthread_map[ pthread_counter-1 ] = th;
980
981                 /* Promises can be satisfied by children */
982                 for (unsigned int i = 0; i < promises.size(); i++) {
983                         Promise *promise = promises[i];
984                         if (promise->thread_is_available(curr->get_tid()))
985                                 promise->add_thread(th->get_id());
986                 }
987                 
988                 break;
989         }
990         case THREAD_JOIN: {
991                 Thread *blocking = curr->get_thread_operand();
992                 ModelAction *act = get_last_action(blocking->get_id());
993                 synchronize(act, curr);
994                 updated = true; /* trigger rel-seq checks */
995                 break;
996         }
997         case PTHREAD_JOIN: {
998                 Thread *blocking = curr->get_thread_operand();
999                 ModelAction *act = get_last_action(blocking->get_id());
1000                 synchronize(act, curr);
1001                 updated = true; /* trigger rel-seq checks */
1002                 break; // WL: to be add (modified)
1003         }
1004
1005         case THREAD_FINISH: {
1006                 Thread *th = get_thread(curr);
1007                 /* Wake up any joining threads */
1008                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1009                         Thread *waiting = get_thread(int_to_id(i));
1010                         if (waiting->waiting_on() == th &&
1011                                         waiting->get_pending()->is_thread_join())
1012                                 scheduler->wake(waiting);
1013                 }
1014                 th->complete();
1015                 /* Completed thread can't satisfy promises */
1016                 for (unsigned int i = 0; i < promises.size(); i++) {
1017                         Promise *promise = promises[i];
1018                         if (promise->thread_is_available(th->get_id()))
1019                                 if (promise->eliminate_thread(th->get_id()))
1020                                         priv->failed_promise = true;
1021                 }
1022                 updated = true; /* trigger rel-seq checks */
1023                 break;
1024         }
1025         case THREAD_START: {
1026                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1027                 break;
1028         }
1029         default:
1030                 break;
1031         }
1032
1033         return updated;
1034 }
1035
1036 /**
1037  * @brief Process the current action for release sequence fixup activity
1038  *
1039  * Performs model-checker release sequence fixups for the current action,
1040  * forcing a single pending release sequence to break (with a given, potential
1041  * "loose" write) or to complete (i.e., synchronize). If a pending release
1042  * sequence forms a complete release sequence, then we must perform the fixup
1043  * synchronization, mo_graph additions, etc.
1044  *
1045  * @param curr The current action; must be a release sequence fixup action
1046  * @param work_queue The work queue to which to add work items as they are
1047  * generated
1048  */
1049 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1050 {
1051         const ModelAction *write = curr->get_node()->get_relseq_break();
1052         struct release_seq *sequence = pending_rel_seqs.back();
1053         pending_rel_seqs.pop_back();
1054         ASSERT(sequence);
1055         ModelAction *acquire = sequence->acquire;
1056         const ModelAction *rf = sequence->rf;
1057         const ModelAction *release = sequence->release;
1058         ASSERT(acquire);
1059         ASSERT(release);
1060         ASSERT(rf);
1061         ASSERT(release->same_thread(rf));
1062
1063         if (write == NULL) {
1064                 /**
1065                  * @todo Forcing a synchronization requires that we set
1066                  * modification order constraints. For instance, we can't allow
1067                  * a fixup sequence in which two separate read-acquire
1068                  * operations read from the same sequence, where the first one
1069                  * synchronizes and the other doesn't. Essentially, we can't
1070                  * allow any writes to insert themselves between 'release' and
1071                  * 'rf'
1072                  */
1073
1074                 /* Must synchronize */
1075                 if (!synchronize(release, acquire))
1076                         return;
1077
1078                 /* Propagate the changed clock vector */
1079                 propagate_clockvector(acquire, work_queue);
1080         } else {
1081                 /* Break release sequence with new edges:
1082                  *   release --mo--> write --mo--> rf */
1083                 mo_graph->addEdge(release, write);
1084                 mo_graph->addEdge(write, rf);
1085         }
1086
1087         /* See if we have realized a data race */
1088         checkDataRaces();
1089 }
1090
1091 /**
1092  * Initialize the current action by performing one or more of the following
1093  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1094  * in the NodeStack, manipulating backtracking sets, allocating and
1095  * initializing clock vectors, and computing the promises to fulfill.
1096  *
1097  * @param curr The current action, as passed from the user context; may be
1098  * freed/invalidated after the execution of this function, with a different
1099  * action "returned" its place (pass-by-reference)
1100  * @return True if curr is a newly-explored action; false otherwise
1101  */
1102 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1103 {
1104         ModelAction *newcurr;
1105
1106         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1107                 newcurr = process_rmw(*curr);
1108                 delete *curr;
1109
1110                 if (newcurr->is_rmw())
1111                         compute_promises(newcurr);
1112
1113                 *curr = newcurr;
1114                 return false;
1115         }
1116
1117         (*curr)->set_seq_number(get_next_seq_num());
1118
1119         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1120         if (newcurr) {
1121                 /* First restore type and order in case of RMW operation */
1122                 if ((*curr)->is_rmwr())
1123                         newcurr->copy_typeandorder(*curr);
1124
1125                 ASSERT((*curr)->get_location() == newcurr->get_location());
1126                 newcurr->copy_from_new(*curr);
1127
1128                 /* Discard duplicate ModelAction; use action from NodeStack */
1129                 delete *curr;
1130
1131                 /* Always compute new clock vector */
1132                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1133
1134                 *curr = newcurr;
1135                 return false; /* Action was explored previously */
1136         } else {
1137                 newcurr = *curr;
1138
1139                 /* Always compute new clock vector */
1140                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1141
1142                 /* Assign most recent release fence */
1143                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1144
1145                 /*
1146                  * Perform one-time actions when pushing new ModelAction onto
1147                  * NodeStack
1148                  */
1149                 if (newcurr->is_write())
1150                         compute_promises(newcurr);
1151                 else if (newcurr->is_relseq_fixup())
1152                         compute_relseq_breakwrites(newcurr);
1153                 else if (newcurr->is_wait())
1154                         newcurr->get_node()->set_misc_max(2);
1155                 else if (newcurr->is_notify_one()) {
1156                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1157                 }
1158                 return true; /* This was a new ModelAction */
1159         }
1160 }
1161
1162 /**
1163  * @brief Establish reads-from relation between two actions
1164  *
1165  * Perform basic operations involved with establishing a concrete rf relation,
1166  * including setting the ModelAction data and checking for release sequences.
1167  *
1168  * @param act The action that is reading (must be a read)
1169  * @param rf The action from which we are reading (must be a write)
1170  *
1171  * @return True if this read established synchronization
1172  */
1173
1174 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1175 {
1176         ASSERT(rf);
1177         ASSERT(rf->is_write());
1178
1179         act->set_read_from(rf);
1180         if (act->is_acquire()) {
1181                 rel_heads_list_t release_heads;
1182                 get_release_seq_heads(act, act, &release_heads);
1183                 int num_heads = release_heads.size();
1184                 for (unsigned int i = 0; i < release_heads.size(); i++)
1185                         if (!synchronize(release_heads[i], act))
1186                                 num_heads--;
1187                 return num_heads > 0;
1188         }
1189         return false;
1190 }
1191
1192 /**
1193  * @brief Synchronizes two actions
1194  *
1195  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1196  * This function performs the synchronization as well as providing other hooks
1197  * for other checks along with synchronization.
1198  *
1199  * @param first The left-hand side of the synchronizes-with relation
1200  * @param second The right-hand side of the synchronizes-with relation
1201  * @return True if the synchronization was successful (i.e., was consistent
1202  * with the execution order); false otherwise
1203  */
1204 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1205 {
1206         if (*second < *first) {
1207                 set_bad_synchronization();
1208                 return false;
1209         }
1210         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1211         return second->synchronize_with(first);
1212 }
1213
1214 /**
1215  * Check promises and eliminate potentially-satisfying threads when a thread is
1216  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1217  * no longer satisfy a promise generated from that thread.
1218  *
1219  * @param blocker The thread on which a thread is waiting
1220  * @param waiting The waiting thread
1221  */
1222 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1223 {
1224         for (unsigned int i = 0; i < promises.size(); i++) {
1225                 Promise *promise = promises[i];
1226                 if (!promise->thread_is_available(waiting->get_id()))
1227                         continue;
1228                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1229                         ModelAction *reader = promise->get_reader(j);
1230                         if (reader->get_tid() != blocker->get_id())
1231                                 continue;
1232                         if (promise->eliminate_thread(waiting->get_id())) {
1233                                 /* Promise has failed */
1234                                 priv->failed_promise = true;
1235                         } else {
1236                                 /* Only eliminate the 'waiting' thread once */
1237                                 return;
1238                         }
1239                 }
1240         }
1241 }
1242
1243 /**
1244  * @brief Check whether a model action is enabled.
1245  *
1246  * Checks whether an operation would be successful (i.e., is a lock already
1247  * locked, or is the joined thread already complete).
1248  *
1249  * For yield-blocking, yields are never enabled.
1250  *
1251  * @param curr is the ModelAction to check whether it is enabled.
1252  * @return a bool that indicates whether the action is enabled.
1253  */
1254 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1255         if (curr->is_lock()) {
1256                 cdsc::mutex *lock = curr->get_mutex();
1257                 struct cdsc::mutex_state *state = lock->get_state();
1258                 if (state->locked)
1259                         return false;
1260         } else if (curr->is_thread_join()) {
1261                 Thread *blocking = curr->get_thread_operand();
1262                 if (!blocking->is_complete()) {
1263                         thread_blocking_check_promises(blocking, get_thread(curr));
1264                         return false;
1265                 }
1266         } else if (params->yieldblock && curr->is_yield()) {
1267                 return false;
1268         }
1269
1270         return true;
1271 }
1272
1273 /**
1274  * This is the heart of the model checker routine. It performs model-checking
1275  * actions corresponding to a given "current action." Among other processes, it
1276  * calculates reads-from relationships, updates synchronization clock vectors,
1277  * forms a memory_order constraints graph, and handles replay/backtrack
1278  * execution when running permutations of previously-observed executions.
1279  *
1280  * @param curr The current action to process
1281  * @return The ModelAction that is actually executed; may be different than
1282  * curr
1283  */
1284 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1285 {
1286         ASSERT(curr);
1287         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1288         bool newly_explored = initialize_curr_action(&curr);
1289
1290         DBG();
1291
1292         wake_up_sleeping_actions(curr);
1293
1294         /* Compute fairness information for CHESS yield algorithm */
1295         if (params->yieldon) {
1296                 curr->get_node()->update_yield(scheduler);
1297         }
1298
1299         /* Add the action to lists before any other model-checking tasks */
1300         if (!second_part_of_rmw)
1301                 add_action_to_lists(curr);
1302
1303         /* Build may_read_from set for newly-created actions */
1304         if (newly_explored && curr->is_read())
1305                 build_may_read_from(curr);
1306
1307         /* Initialize work_queue with the "current action" work */
1308         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1309         while (!work_queue.empty() && !has_asserted()) {
1310                 WorkQueueEntry work = work_queue.front();
1311                 work_queue.pop_front();
1312
1313                 switch (work.type) {
1314                 case WORK_CHECK_CURR_ACTION: {
1315                         ModelAction *act = work.action;
1316                         bool update = false; /* update this location's release seq's */
1317                         bool update_all = false; /* update all release seq's */
1318
1319                         if (process_thread_action(curr))
1320                                 update_all = true;
1321
1322                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1323                                 update = true;
1324
1325                         if (act->is_write() && process_write(act, &work_queue))
1326                                 update = true;
1327
1328                         if (act->is_fence() && process_fence(act))
1329                                 update_all = true;
1330
1331                         if (act->is_mutex_op() && process_mutex(act))
1332                                 update_all = true;
1333
1334                         if (act->is_relseq_fixup())
1335                                 process_relseq_fixup(curr, &work_queue);
1336
1337                         if (update_all)
1338                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1339                         else if (update)
1340                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1341                         break;
1342                 }
1343                 case WORK_CHECK_RELEASE_SEQ:
1344                         resolve_release_sequences(work.location, &work_queue);
1345                         break;
1346                 case WORK_CHECK_MO_EDGES: {
1347                         /** @todo Complete verification of work_queue */
1348                         ModelAction *act = work.action;
1349                         bool updated = false;
1350
1351                         if (act->is_read()) {
1352                                 const ModelAction *rf = act->get_reads_from();
1353                                 const Promise *promise = act->get_reads_from_promise();
1354                                 if (rf) {
1355                                         if (r_modification_order(act, rf))
1356                                                 updated = true;
1357                                         if (act->is_seqcst()) {
1358                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1359                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1360                                                         set_bad_sc_read();
1361                                                 }
1362                                         }
1363                                 } else if (promise) {
1364                                         if (r_modification_order(act, promise))
1365                                                 updated = true;
1366                                 }
1367                         }
1368                         if (act->is_write()) {
1369                                 if (w_modification_order(act, NULL))
1370                                         updated = true;
1371                         }
1372                         mo_graph->commitChanges();
1373
1374                         if (updated)
1375                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1376                         break;
1377                 }
1378                 default:
1379                         ASSERT(false);
1380                         break;
1381                 }
1382         }
1383
1384         check_curr_backtracking(curr);
1385         set_backtracking(curr);
1386         return curr;
1387 }
1388
1389 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1390 {
1391         Node *currnode = curr->get_node();
1392         Node *parnode = currnode->get_parent();
1393
1394         if ((parnode && !parnode->backtrack_empty()) ||
1395                          !currnode->misc_empty() ||
1396                          !currnode->read_from_empty() ||
1397                          !currnode->promise_empty() ||
1398                          !currnode->relseq_break_empty()) {
1399                 set_latest_backtrack(curr);
1400         }
1401 }
1402
1403 bool ModelExecution::promises_expired() const
1404 {
1405         for (unsigned int i = 0; i < promises.size(); i++) {
1406                 Promise *promise = promises[i];
1407                 if (promise->get_expiration() < priv->used_sequence_numbers)
1408                         return true;
1409         }
1410         return false;
1411 }
1412
1413 /**
1414  * This is the strongest feasibility check available.
1415  * @return whether the current trace (partial or complete) must be a prefix of
1416  * a feasible trace.
1417  */
1418 bool ModelExecution::isfeasibleprefix() const
1419 {
1420         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1421 }
1422
1423 /**
1424  * Print disagnostic information about an infeasible execution
1425  * @param prefix A string to prefix the output with; if NULL, then a default
1426  * message prefix will be provided
1427  */
1428 void ModelExecution::print_infeasibility(const char *prefix) const
1429 {
1430         char buf[100];
1431         char *ptr = buf;
1432         if (mo_graph->checkForCycles())
1433                 ptr += sprintf(ptr, "[mo cycle]");
1434         if (priv->failed_promise || priv->hard_failed_promise)
1435                 ptr += sprintf(ptr, "[failed promise]");
1436         if (priv->too_many_reads)
1437                 ptr += sprintf(ptr, "[too many reads]");
1438         if (priv->no_valid_reads)
1439                 ptr += sprintf(ptr, "[no valid reads-from]");
1440         if (priv->bad_synchronization)
1441                 ptr += sprintf(ptr, "[bad sw ordering]");
1442         if (priv->bad_sc_read)
1443                 ptr += sprintf(ptr, "[bad sc read]");
1444         if (promises_expired())
1445                 ptr += sprintf(ptr, "[promise expired]");
1446         if (promises.size() != 0)
1447                 ptr += sprintf(ptr, "[unresolved promise]");
1448         if (ptr != buf)
1449                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1450 }
1451
1452 /**
1453  * Returns whether the current completed trace is feasible, except for pending
1454  * release sequences.
1455  */
1456 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1457 {
1458         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1459
1460 }
1461
1462 /**
1463  * Check if the current partial trace is infeasible. Does not check any
1464  * end-of-execution flags, which might rule out the execution. Thus, this is
1465  * useful only for ruling an execution as infeasible.
1466  * @return whether the current partial trace is infeasible.
1467  */
1468 bool ModelExecution::is_infeasible() const
1469 {
1470         return mo_graph->checkForCycles() ||
1471                 priv->no_valid_reads ||
1472                 priv->too_many_reads ||
1473                 priv->bad_synchronization ||
1474                 priv->bad_sc_read ||
1475                 priv->hard_failed_promise ||
1476                 promises_expired();
1477 }
1478
1479 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1480 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1481         ModelAction *lastread = get_last_action(act->get_tid());
1482         lastread->process_rmw(act);
1483         if (act->is_rmw()) {
1484                 if (lastread->get_reads_from())
1485                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1486                 else
1487                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1488                 mo_graph->commitChanges();
1489         }
1490         return lastread;
1491 }
1492
1493 /**
1494  * A helper function for ModelExecution::check_recency, to check if the current
1495  * thread is able to read from a different write/promise for 'params.maxreads'
1496  * number of steps and if that write/promise should become visible (i.e., is
1497  * ordered later in the modification order). This helps model memory liveness.
1498  *
1499  * @param curr The current action. Must be a read.
1500  * @param rf The write/promise from which we plan to read
1501  * @param other_rf The write/promise from which we may read
1502  * @return True if we were able to read from other_rf for params.maxreads steps
1503  */
1504 template <typename T, typename U>
1505 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1506 {
1507         /* Need a different write/promise */
1508         if (other_rf->equals(rf))
1509                 return false;
1510
1511         /* Only look for "newer" writes/promises */
1512         if (!mo_graph->checkReachable(rf, other_rf))
1513                 return false;
1514
1515         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1516         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1517         action_list_t::reverse_iterator rit = list->rbegin();
1518         ASSERT((*rit) == curr);
1519         /* Skip past curr */
1520         rit++;
1521
1522         /* Does this write/promise work for everyone? */
1523         for (int i = 0; i < params->maxreads; i++, rit++) {
1524                 ModelAction *act = *rit;
1525                 if (!act->may_read_from(other_rf))
1526                         return false;
1527         }
1528         return true;
1529 }
1530
1531 /**
1532  * Checks whether a thread has read from the same write or Promise for too many
1533  * times without seeing the effects of a later write/Promise.
1534  *
1535  * Basic idea:
1536  * 1) there must a different write/promise that we could read from,
1537  * 2) we must have read from the same write/promise in excess of maxreads times,
1538  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1539  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1540  *
1541  * If so, we decide that the execution is no longer feasible.
1542  *
1543  * @param curr The current action. Must be a read.
1544  * @param rf The ModelAction/Promise from which we might read.
1545  * @return True if the read should succeed; false otherwise
1546  */
1547 template <typename T>
1548 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1549 {
1550         if (!params->maxreads)
1551                 return true;
1552
1553         //NOTE: Next check is just optimization, not really necessary....
1554         if (curr->get_node()->get_read_from_past_size() +
1555                         curr->get_node()->get_read_from_promise_size() <= 1)
1556                 return true;
1557
1558         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1559         int tid = id_to_int(curr->get_tid());
1560         ASSERT(tid < (int)thrd_lists->size());
1561         action_list_t *list = &(*thrd_lists)[tid];
1562         action_list_t::reverse_iterator rit = list->rbegin();
1563         ASSERT((*rit) == curr);
1564         /* Skip past curr */
1565         rit++;
1566
1567         action_list_t::reverse_iterator ritcopy = rit;
1568         /* See if we have enough reads from the same value */
1569         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1570                 if (ritcopy == list->rend())
1571                         return true;
1572                 ModelAction *act = *ritcopy;
1573                 if (!act->is_read())
1574                         return true;
1575                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1576                         return true;
1577                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1578                         return true;
1579                 if (act->get_node()->get_read_from_past_size() +
1580                                 act->get_node()->get_read_from_promise_size() <= 1)
1581                         return true;
1582         }
1583         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1584                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1585                 if (should_read_instead(curr, rf, write))
1586                         return false; /* liveness failure */
1587         }
1588         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1589                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1590                 if (should_read_instead(curr, rf, promise))
1591                         return false; /* liveness failure */
1592         }
1593         return true;
1594 }
1595
1596 /**
1597  * @brief Updates the mo_graph with the constraints imposed from the current
1598  * read.
1599  *
1600  * Basic idea is the following: Go through each other thread and find
1601  * the last action that happened before our read.  Two cases:
1602  *
1603  * -# The action is a write: that write must either occur before
1604  * the write we read from or be the write we read from.
1605  * -# The action is a read: the write that that action read from
1606  * must occur before the write we read from or be the same write.
1607  *
1608  * @param curr The current action. Must be a read.
1609  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1610  * @return True if modification order edges were added; false otherwise
1611  */
1612 template <typename rf_type>
1613 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1614 {
1615         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1616         unsigned int i;
1617         bool added = false;
1618         ASSERT(curr->is_read());
1619
1620         /* Last SC fence in the current thread */
1621         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1622         ModelAction *last_sc_write = NULL;
1623         if (curr->is_seqcst())
1624                 last_sc_write = get_last_seq_cst_write(curr);
1625
1626         /* Iterate over all threads */
1627         for (i = 0; i < thrd_lists->size(); i++) {
1628                 /* Last SC fence in thread i */
1629                 ModelAction *last_sc_fence_thread_local = NULL;
1630                 if (int_to_id((int)i) != curr->get_tid())
1631                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1632
1633                 /* Last SC fence in thread i, before last SC fence in current thread */
1634                 ModelAction *last_sc_fence_thread_before = NULL;
1635                 if (last_sc_fence_local)
1636                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1637
1638                 /* Iterate over actions in thread, starting from most recent */
1639                 action_list_t *list = &(*thrd_lists)[i];
1640                 action_list_t::reverse_iterator rit;
1641                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1642                         ModelAction *act = *rit;
1643
1644                         /* Skip curr */
1645                         if (act == curr)
1646                                 continue;
1647                         /* Don't want to add reflexive edges on 'rf' */
1648                         if (act->equals(rf)) {
1649                                 if (act->happens_before(curr))
1650                                         break;
1651                                 else
1652                                         continue;
1653                         }
1654
1655                         if (act->is_write()) {
1656                                 /* C++, Section 29.3 statement 5 */
1657                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1658                                                 *act < *last_sc_fence_thread_local) {
1659                                         added = mo_graph->addEdge(act, rf) || added;
1660                                         break;
1661                                 }
1662                                 /* C++, Section 29.3 statement 4 */
1663                                 else if (act->is_seqcst() && last_sc_fence_local &&
1664                                                 *act < *last_sc_fence_local) {
1665                                         added = mo_graph->addEdge(act, rf) || added;
1666                                         break;
1667                                 }
1668                                 /* C++, Section 29.3 statement 6 */
1669                                 else if (last_sc_fence_thread_before &&
1670                                                 *act < *last_sc_fence_thread_before) {
1671                                         added = mo_graph->addEdge(act, rf) || added;
1672                                         break;
1673                                 }
1674                         }
1675
1676                         /*
1677                          * Include at most one act per-thread that "happens
1678                          * before" curr
1679                          */
1680                         if (act->happens_before(curr)) {
1681                                 if (act->is_write()) {
1682                                         added = mo_graph->addEdge(act, rf) || added;
1683                                 } else {
1684                                         const ModelAction *prevrf = act->get_reads_from();
1685                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1686                                         if (prevrf) {
1687                                                 if (!prevrf->equals(rf))
1688                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1689                                         } else if (!prevrf_promise->equals(rf)) {
1690                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1691                                         }
1692                                 }
1693                                 break;
1694                         }
1695                 }
1696         }
1697
1698         /*
1699          * All compatible, thread-exclusive promises must be ordered after any
1700          * concrete loads from the same thread
1701          */
1702         for (unsigned int i = 0; i < promises.size(); i++)
1703                 if (promises[i]->is_compatible_exclusive(curr))
1704                         added = mo_graph->addEdge(rf, promises[i]) || added;
1705
1706         return added;
1707 }
1708
1709 /**
1710  * Updates the mo_graph with the constraints imposed from the current write.
1711  *
1712  * Basic idea is the following: Go through each other thread and find
1713  * the lastest action that happened before our write.  Two cases:
1714  *
1715  * (1) The action is a write => that write must occur before
1716  * the current write
1717  *
1718  * (2) The action is a read => the write that that action read from
1719  * must occur before the current write.
1720  *
1721  * This method also handles two other issues:
1722  *
1723  * (I) Sequential Consistency: Making sure that if the current write is
1724  * seq_cst, that it occurs after the previous seq_cst write.
1725  *
1726  * (II) Sending the write back to non-synchronizing reads.
1727  *
1728  * @param curr The current action. Must be a write.
1729  * @param send_fv A vector for stashing reads to which we may pass our future
1730  * value. If NULL, then don't record any future values.
1731  * @return True if modification order edges were added; false otherwise
1732  */
1733 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1734 {
1735         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1736         unsigned int i;
1737         bool added = false;
1738         ASSERT(curr->is_write());
1739
1740         if (curr->is_seqcst()) {
1741                 /* We have to at least see the last sequentially consistent write,
1742                          so we are initialized. */
1743                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1744                 if (last_seq_cst != NULL) {
1745                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1746                 }
1747         }
1748
1749         /* Last SC fence in the current thread */
1750         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1751
1752         /* Iterate over all threads */
1753         for (i = 0; i < thrd_lists->size(); i++) {
1754                 /* Last SC fence in thread i, before last SC fence in current thread */
1755                 ModelAction *last_sc_fence_thread_before = NULL;
1756                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1757                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1758
1759                 /* Iterate over actions in thread, starting from most recent */
1760                 action_list_t *list = &(*thrd_lists)[i];
1761                 action_list_t::reverse_iterator rit;
1762                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1763                         ModelAction *act = *rit;
1764                         if (act == curr) {
1765                                 /*
1766                                  * 1) If RMW and it actually read from something, then we
1767                                  * already have all relevant edges, so just skip to next
1768                                  * thread.
1769                                  *
1770                                  * 2) If RMW and it didn't read from anything, we should
1771                                  * whatever edge we can get to speed up convergence.
1772                                  *
1773                                  * 3) If normal write, we need to look at earlier actions, so
1774                                  * continue processing list.
1775                                  */
1776                                 if (curr->is_rmw()) {
1777                                         if (curr->get_reads_from() != NULL)
1778                                                 break;
1779                                         else
1780                                                 continue;
1781                                 } else
1782                                         continue;
1783                         }
1784
1785                         /* C++, Section 29.3 statement 7 */
1786                         if (last_sc_fence_thread_before && act->is_write() &&
1787                                         *act < *last_sc_fence_thread_before) {
1788                                 added = mo_graph->addEdge(act, curr) || added;
1789                                 break;
1790                         }
1791
1792                         /*
1793                          * Include at most one act per-thread that "happens
1794                          * before" curr
1795                          */
1796                         if (act->happens_before(curr)) {
1797                                 /*
1798                                  * Note: if act is RMW, just add edge:
1799                                  *   act --mo--> curr
1800                                  * The following edge should be handled elsewhere:
1801                                  *   readfrom(act) --mo--> act
1802                                  */
1803                                 if (act->is_write())
1804                                         added = mo_graph->addEdge(act, curr) || added;
1805                                 else if (act->is_read()) {
1806                                         //if previous read accessed a null, just keep going
1807                                         if (act->get_reads_from() == NULL) {
1808                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1809                                         } else
1810                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1811                                 }
1812                                 break;
1813                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1814                                                      !act->same_thread(curr)) {
1815                                 /* We have an action that:
1816                                    (1) did not happen before us
1817                                    (2) is a read and we are a write
1818                                    (3) cannot synchronize with us
1819                                    (4) is in a different thread
1820                                    =>
1821                                    that read could potentially read from our write.  Note that
1822                                    these checks are overly conservative at this point, we'll
1823                                    do more checks before actually removing the
1824                                    pendingfuturevalue.
1825
1826                                  */
1827
1828                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1829                                         if (!is_infeasible())
1830                                                 send_fv->push_back(act);
1831                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1832                                                 add_future_value(curr, act);
1833                                 }
1834                         }
1835                 }
1836         }
1837
1838         /*
1839          * All compatible, thread-exclusive promises must be ordered after any
1840          * concrete stores to the same thread, or else they can be merged with
1841          * this store later
1842          */
1843         for (unsigned int i = 0; i < promises.size(); i++)
1844                 if (promises[i]->is_compatible_exclusive(curr))
1845                         added = mo_graph->addEdge(curr, promises[i]) || added;
1846
1847         return added;
1848 }
1849
1850 //This procedure uses cohere to prune future values that are
1851 //guaranteed to generate a coherence violation.
1852 //
1853 //need to see if there is (1) a promise for thread write, (2)
1854 //the promise is sb before write, (3) the promise can only be
1855 //resolved by the thread read, and (4) the promise has same
1856 //location as read/write
1857
1858 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1859         thread_id_t write_tid=write->get_tid();
1860         for(unsigned int i = promises.size(); i>0; i--) {
1861                 Promise *pr=promises[i-1];
1862                 if (!pr->same_location(write))
1863                         continue;
1864                 //the reading thread is the only thread that can resolve the promise
1865                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1866                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1867                                 ModelAction *prreader=pr->get_reader(j);
1868                                 //the writing thread reads from the promise before the write
1869                                 if (prreader->get_tid()==write_tid &&
1870                                                 (*prreader)<(*write)) {
1871                                         if ((*read)>(*prreader)) {
1872                                                 //check that we don't have a read between the read and promise
1873                                                 //from the same thread as read
1874                                                 bool okay=false;
1875                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1876                                                         tmp=tmp->get_node()->get_parent()->get_action();
1877                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1878                                                                 okay=true;
1879                                                                 break;
1880                                                         }
1881                                                 }
1882                                                 if (okay)
1883                                                         continue;
1884                                         }
1885                                         return false;
1886                                 }
1887                         }
1888                 }
1889         }
1890         return true;
1891 }
1892
1893
1894 /** Arbitrary reads from the future are not allowed.  Section 29.3
1895  * part 9 places some constraints.  This method checks one result of constraint
1896  * constraint.  Others require compiler support. */
1897 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1898 {
1899         if (!writer->is_rmw())
1900                 return true;
1901
1902         if (!reader->is_rmw())
1903                 return true;
1904
1905         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1906                 if (search == reader)
1907                         return false;
1908                 if (search->get_tid() == reader->get_tid() &&
1909                                 search->happens_before(reader))
1910                         break;
1911         }
1912
1913         return true;
1914 }
1915
1916 /**
1917  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1918  * some constraints. This method checks one the following constraint (others
1919  * require compiler support):
1920  *
1921  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1922  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1923  */
1924 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1925 {
1926         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1927         unsigned int i;
1928         /* Iterate over all threads */
1929         for (i = 0; i < thrd_lists->size(); i++) {
1930                 const ModelAction *write_after_read = NULL;
1931
1932                 /* Iterate over actions in thread, starting from most recent */
1933                 action_list_t *list = &(*thrd_lists)[i];
1934                 action_list_t::reverse_iterator rit;
1935                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1936                         ModelAction *act = *rit;
1937
1938                         /* Don't disallow due to act == reader */
1939                         if (!reader->happens_before(act) || reader == act)
1940                                 break;
1941                         else if (act->is_write())
1942                                 write_after_read = act;
1943                         else if (act->is_read() && act->get_reads_from() != NULL)
1944                                 write_after_read = act->get_reads_from();
1945                 }
1946
1947                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1948                         return false;
1949         }
1950         return true;
1951 }
1952
1953 /**
1954  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1955  * The ModelAction under consideration is expected to be taking part in
1956  * release/acquire synchronization as an object of the "reads from" relation.
1957  * Note that this can only provide release sequence support for RMW chains
1958  * which do not read from the future, as those actions cannot be traced until
1959  * their "promise" is fulfilled. Similarly, we may not even establish the
1960  * presence of a release sequence with certainty, as some modification order
1961  * constraints may be decided further in the future. Thus, this function
1962  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1963  * and a boolean representing certainty.
1964  *
1965  * @param rf The action that might be part of a release sequence. Must be a
1966  * write.
1967  * @param release_heads A pass-by-reference style return parameter. After
1968  * execution of this function, release_heads will contain the heads of all the
1969  * relevant release sequences, if any exists with certainty
1970  * @param pending A pass-by-reference style return parameter which is only used
1971  * when returning false (i.e., uncertain). Returns most information regarding
1972  * an uncertain release sequence, including any write operations that might
1973  * break the sequence.
1974  * @return true, if the ModelExecution is certain that release_heads is complete;
1975  * false otherwise
1976  */
1977 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1978                 rel_heads_list_t *release_heads,
1979                 struct release_seq *pending) const
1980 {
1981         /* Only check for release sequences if there are no cycles */
1982         if (mo_graph->checkForCycles())
1983                 return false;
1984
1985         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1986                 ASSERT(rf->is_write());
1987
1988                 if (rf->is_release())
1989                         release_heads->push_back(rf);
1990                 else if (rf->get_last_fence_release())
1991                         release_heads->push_back(rf->get_last_fence_release());
1992                 if (!rf->is_rmw())
1993                         break; /* End of RMW chain */
1994
1995                 /** @todo Need to be smarter here...  In the linux lock
1996                  * example, this will run to the beginning of the program for
1997                  * every acquire. */
1998                 /** @todo The way to be smarter here is to keep going until 1
1999                  * thread has a release preceded by an acquire and you've seen
2000                  *       both. */
2001
2002                 /* acq_rel RMW is a sufficient stopping condition */
2003                 if (rf->is_acquire() && rf->is_release())
2004                         return true; /* complete */
2005         };
2006         if (!rf) {
2007                 /* read from future: need to settle this later */
2008                 pending->rf = NULL;
2009                 return false; /* incomplete */
2010         }
2011
2012         if (rf->is_release())
2013                 return true; /* complete */
2014
2015         /* else relaxed write
2016          * - check for fence-release in the same thread (29.8, stmt. 3)
2017          * - check modification order for contiguous subsequence
2018          *   -> rf must be same thread as release */
2019
2020         const ModelAction *fence_release = rf->get_last_fence_release();
2021         /* Synchronize with a fence-release unconditionally; we don't need to
2022          * find any more "contiguous subsequence..." for it */
2023         if (fence_release)
2024                 release_heads->push_back(fence_release);
2025
2026         int tid = id_to_int(rf->get_tid());
2027         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
2028         action_list_t *list = &(*thrd_lists)[tid];
2029         action_list_t::const_reverse_iterator rit;
2030
2031         /* Find rf in the thread list */
2032         rit = std::find(list->rbegin(), list->rend(), rf);
2033         ASSERT(rit != list->rend());
2034
2035         /* Find the last {write,fence}-release */
2036         for (; rit != list->rend(); rit++) {
2037                 if (fence_release && *(*rit) < *fence_release)
2038                         break;
2039                 if ((*rit)->is_release())
2040                         break;
2041         }
2042         if (rit == list->rend()) {
2043                 /* No write-release in this thread */
2044                 return true; /* complete */
2045         } else if (fence_release && *(*rit) < *fence_release) {
2046                 /* The fence-release is more recent (and so, "stronger") than
2047                  * the most recent write-release */
2048                 return true; /* complete */
2049         } /* else, need to establish contiguous release sequence */
2050         ModelAction *release = *rit;
2051
2052         ASSERT(rf->same_thread(release));
2053
2054         pending->writes.clear();
2055
2056         bool certain = true;
2057         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2058                 if (id_to_int(rf->get_tid()) == (int)i)
2059                         continue;
2060                 list = &(*thrd_lists)[i];
2061
2062                 /* Can we ensure no future writes from this thread may break
2063                  * the release seq? */
2064                 bool future_ordered = false;
2065
2066                 ModelAction *last = get_last_action(int_to_id(i));
2067                 Thread *th = get_thread(int_to_id(i));
2068                 if ((last && rf->happens_before(last)) ||
2069                                 !is_enabled(th) ||
2070                                 th->is_complete())
2071                         future_ordered = true;
2072
2073                 ASSERT(!th->is_model_thread() || future_ordered);
2074
2075                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2076                         const ModelAction *act = *rit;
2077                         /* Reach synchronization -> this thread is complete */
2078                         if (act->happens_before(release))
2079                                 break;
2080                         if (rf->happens_before(act)) {
2081                                 future_ordered = true;
2082                                 continue;
2083                         }
2084
2085                         /* Only non-RMW writes can break release sequences */
2086                         if (!act->is_write() || act->is_rmw())
2087                                 continue;
2088
2089                         /* Check modification order */
2090                         if (mo_graph->checkReachable(rf, act)) {
2091                                 /* rf --mo--> act */
2092                                 future_ordered = true;
2093                                 continue;
2094                         }
2095                         if (mo_graph->checkReachable(act, release))
2096                                 /* act --mo--> release */
2097                                 break;
2098                         if (mo_graph->checkReachable(release, act) &&
2099                                       mo_graph->checkReachable(act, rf)) {
2100                                 /* release --mo-> act --mo--> rf */
2101                                 return true; /* complete */
2102                         }
2103                         /* act may break release sequence */
2104                         pending->writes.push_back(act);
2105                         certain = false;
2106                 }
2107                 if (!future_ordered)
2108                         certain = false; /* This thread is uncertain */
2109         }
2110
2111         if (certain) {
2112                 release_heads->push_back(release);
2113                 pending->writes.clear();
2114         } else {
2115                 pending->release = release;
2116                 pending->rf = rf;
2117         }
2118         return certain;
2119 }
2120
2121 /**
2122  * An interface for getting the release sequence head(s) with which a
2123  * given ModelAction must synchronize. This function only returns a non-empty
2124  * result when it can locate a release sequence head with certainty. Otherwise,
2125  * it may mark the internal state of the ModelExecution so that it will handle
2126  * the release sequence at a later time, causing @a acquire to update its
2127  * synchronization at some later point in execution.
2128  *
2129  * @param acquire The 'acquire' action that may synchronize with a release
2130  * sequence
2131  * @param read The read action that may read from a release sequence; this may
2132  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2133  * when 'acquire' is a fence-acquire)
2134  * @param release_heads A pass-by-reference return parameter. Will be filled
2135  * with the head(s) of the release sequence(s), if they exists with certainty.
2136  * @see ModelExecution::release_seq_heads
2137  */
2138 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2139                 ModelAction *read, rel_heads_list_t *release_heads)
2140 {
2141         const ModelAction *rf = read->get_reads_from();
2142         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2143         sequence->acquire = acquire;
2144         sequence->read = read;
2145
2146         if (!release_seq_heads(rf, release_heads, sequence)) {
2147                 /* add act to 'lazy checking' list */
2148                 pending_rel_seqs.push_back(sequence);
2149         } else {
2150                 snapshot_free(sequence);
2151         }
2152 }
2153
2154 /**
2155  * @brief Propagate a modified clock vector to actions later in the execution
2156  * order
2157  *
2158  * After an acquire operation lazily completes a release-sequence
2159  * synchronization, we must update all clock vectors for operations later than
2160  * the acquire in the execution order.
2161  *
2162  * @param acquire The ModelAction whose clock vector must be propagated
2163  * @param work The work queue to which we can add work items, if this
2164  * propagation triggers more updates (e.g., to the modification order)
2165  */
2166 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2167 {
2168         /* Re-check all pending release sequences */
2169         work->push_back(CheckRelSeqWorkEntry(NULL));
2170         /* Re-check read-acquire for mo_graph edges */
2171         work->push_back(MOEdgeWorkEntry(acquire));
2172
2173         /* propagate synchronization to later actions */
2174         action_list_t::reverse_iterator rit = action_trace.rbegin();
2175         for (; (*rit) != acquire; rit++) {
2176                 ModelAction *propagate = *rit;
2177                 if (acquire->happens_before(propagate)) {
2178                         synchronize(acquire, propagate);
2179                         /* Re-check 'propagate' for mo_graph edges */
2180                         work->push_back(MOEdgeWorkEntry(propagate));
2181                 }
2182         }
2183 }
2184
2185 /**
2186  * Attempt to resolve all stashed operations that might synchronize with a
2187  * release sequence for a given location. This implements the "lazy" portion of
2188  * determining whether or not a release sequence was contiguous, since not all
2189  * modification order information is present at the time an action occurs.
2190  *
2191  * @param location The location/object that should be checked for release
2192  * sequence resolutions. A NULL value means to check all locations.
2193  * @param work_queue The work queue to which to add work items as they are
2194  * generated
2195  * @return True if any updates occurred (new synchronization, new mo_graph
2196  * edges)
2197  */
2198 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2199 {
2200         bool updated = false;
2201         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2202         while (it != pending_rel_seqs.end()) {
2203                 struct release_seq *pending = *it;
2204                 ModelAction *acquire = pending->acquire;
2205                 const ModelAction *read = pending->read;
2206
2207                 /* Only resolve sequences on the given location, if provided */
2208                 if (location && read->get_location() != location) {
2209                         it++;
2210                         continue;
2211                 }
2212
2213                 const ModelAction *rf = read->get_reads_from();
2214                 rel_heads_list_t release_heads;
2215                 bool complete;
2216                 complete = release_seq_heads(rf, &release_heads, pending);
2217                 for (unsigned int i = 0; i < release_heads.size(); i++)
2218                         if (!acquire->has_synchronized_with(release_heads[i]))
2219                                 if (synchronize(release_heads[i], acquire))
2220                                         updated = true;
2221
2222                 if (updated) {
2223                         /* Propagate the changed clock vector */
2224                         propagate_clockvector(acquire, work_queue);
2225                 }
2226                 if (complete) {
2227                         it = pending_rel_seqs.erase(it);
2228                         snapshot_free(pending);
2229                 } else {
2230                         it++;
2231                 }
2232         }
2233
2234         // If we resolved promises or data races, see if we have realized a data race.
2235         checkDataRaces();
2236
2237         return updated;
2238 }
2239
2240 /**
2241  * Performs various bookkeeping operations for the current ModelAction. For
2242  * instance, adds action to the per-object, per-thread action vector and to the
2243  * action trace list of all thread actions.
2244  *
2245  * @param act is the ModelAction to add.
2246  */
2247 void ModelExecution::add_action_to_lists(ModelAction *act)
2248 {
2249         int tid = id_to_int(act->get_tid());
2250         ModelAction *uninit = NULL;
2251         int uninit_id = -1;
2252         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2253         if (list->empty() && act->is_atomic_var()) {
2254                 uninit = get_uninitialized_action(act);
2255                 uninit_id = id_to_int(uninit->get_tid());
2256                 list->push_front(uninit);
2257         }
2258         list->push_back(act);
2259
2260         action_trace.push_back(act);
2261         if (uninit)
2262                 action_trace.push_front(uninit);
2263
2264         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2265         if (tid >= (int)vec->size())
2266                 vec->resize(priv->next_thread_id);
2267         (*vec)[tid].push_back(act);
2268         if (uninit)
2269                 (*vec)[uninit_id].push_front(uninit);
2270
2271         if ((int)thrd_last_action.size() <= tid)
2272                 thrd_last_action.resize(get_num_threads());
2273         thrd_last_action[tid] = act;
2274         if (uninit)
2275                 thrd_last_action[uninit_id] = uninit;
2276
2277         if (act->is_fence() && act->is_release()) {
2278                 if ((int)thrd_last_fence_release.size() <= tid)
2279                         thrd_last_fence_release.resize(get_num_threads());
2280                 thrd_last_fence_release[tid] = act;
2281         }
2282
2283         if (act->is_wait()) {
2284                 void *mutex_loc = (void *) act->get_value();
2285                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2286
2287                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2288                 if (tid >= (int)vec->size())
2289                         vec->resize(priv->next_thread_id);
2290                 (*vec)[tid].push_back(act);
2291         }
2292 }
2293
2294 /**
2295  * @brief Get the last action performed by a particular Thread
2296  * @param tid The thread ID of the Thread in question
2297  * @return The last action in the thread
2298  */
2299 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2300 {
2301         int threadid = id_to_int(tid);
2302         if (threadid < (int)thrd_last_action.size())
2303                 return thrd_last_action[id_to_int(tid)];
2304         else
2305                 return NULL;
2306 }
2307
2308 /**
2309  * @brief Get the last fence release performed by a particular Thread
2310  * @param tid The thread ID of the Thread in question
2311  * @return The last fence release in the thread, if one exists; NULL otherwise
2312  */
2313 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2314 {
2315         int threadid = id_to_int(tid);
2316         if (threadid < (int)thrd_last_fence_release.size())
2317                 return thrd_last_fence_release[id_to_int(tid)];
2318         else
2319                 return NULL;
2320 }
2321
2322 /**
2323  * Gets the last memory_order_seq_cst write (in the total global sequence)
2324  * performed on a particular object (i.e., memory location), not including the
2325  * current action.
2326  * @param curr The current ModelAction; also denotes the object location to
2327  * check
2328  * @return The last seq_cst write
2329  */
2330 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2331 {
2332         void *location = curr->get_location();
2333         action_list_t *list = obj_map.get(location);
2334         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2335         action_list_t::reverse_iterator rit;
2336         for (rit = list->rbegin(); (*rit) != curr; rit++)
2337                 ;
2338         rit++; /* Skip past curr */
2339         for ( ; rit != list->rend(); rit++)
2340                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2341                         return *rit;
2342         return NULL;
2343 }
2344
2345 /**
2346  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2347  * performed in a particular thread, prior to a particular fence.
2348  * @param tid The ID of the thread to check
2349  * @param before_fence The fence from which to begin the search; if NULL, then
2350  * search for the most recent fence in the thread.
2351  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2352  */
2353 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2354 {
2355         /* All fences should have location FENCE_LOCATION */
2356         action_list_t *list = obj_map.get(FENCE_LOCATION);
2357
2358         if (!list)
2359                 return NULL;
2360
2361         action_list_t::reverse_iterator rit = list->rbegin();
2362
2363         if (before_fence) {
2364                 for (; rit != list->rend(); rit++)
2365                         if (*rit == before_fence)
2366                                 break;
2367
2368                 ASSERT(*rit == before_fence);
2369                 rit++;
2370         }
2371
2372         for (; rit != list->rend(); rit++)
2373                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2374                         return *rit;
2375         return NULL;
2376 }
2377
2378 /**
2379  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2380  * location). This function identifies the mutex according to the current
2381  * action, which is presumed to perform on the same mutex.
2382  * @param curr The current ModelAction; also denotes the object location to
2383  * check
2384  * @return The last unlock operation
2385  */
2386 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2387 {
2388         void *location = curr->get_location();
2389
2390         action_list_t *list = obj_map.get(location);
2391         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2392         action_list_t::reverse_iterator rit;
2393         for (rit = list->rbegin(); rit != list->rend(); rit++)
2394                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2395                         return *rit;
2396         return NULL;
2397 }
2398
2399 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2400 {
2401         ModelAction *parent = get_last_action(tid);
2402         if (!parent)
2403                 parent = get_thread(tid)->get_creation();
2404         return parent;
2405 }
2406
2407 /**
2408  * Returns the clock vector for a given thread.
2409  * @param tid The thread whose clock vector we want
2410  * @return Desired clock vector
2411  */
2412 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2413 {
2414         return get_parent_action(tid)->get_cv();
2415 }
2416
2417 /**
2418  * @brief Find the promise (if any) to resolve for the current action and
2419  * remove it from the pending promise vector
2420  * @param curr The current ModelAction. Should be a write.
2421  * @return The Promise to resolve, if any; otherwise NULL
2422  */
2423 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2424 {
2425         for (unsigned int i = 0; i < promises.size(); i++)
2426                 if (curr->get_node()->get_promise(i)) {
2427                         Promise *ret = promises[i];
2428                         promises.erase(promises.begin() + i);
2429                         return ret;
2430                 }
2431         return NULL;
2432 }
2433
2434 /**
2435  * Resolve a Promise with a current write.
2436  * @param write The ModelAction that is fulfilling Promises
2437  * @param promise The Promise to resolve
2438  * @param work The work queue, for adding new fixup work
2439  * @return True if the Promise was successfully resolved; false otherwise
2440  */
2441 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2442                 work_queue_t *work)
2443 {
2444         ModelVector<ModelAction *> actions_to_check;
2445
2446         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2447                 ModelAction *read = promise->get_reader(i);
2448                 if (read_from(read, write)) {
2449                         /* Propagate the changed clock vector */
2450                         propagate_clockvector(read, work);
2451                 }
2452                 actions_to_check.push_back(read);
2453         }
2454         /* Make sure the promise's value matches the write's value */
2455         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2456         if (!mo_graph->resolvePromise(promise, write))
2457                 priv->hard_failed_promise = true;
2458
2459         /**
2460          * @todo  It is possible to end up in an inconsistent state, where a
2461          * "resolved" promise may still be referenced if
2462          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2463          *
2464          * Note that the inconsistency only matters when dumping mo_graph to
2465          * file.
2466          *
2467          * delete promise;
2468          */
2469
2470         //Check whether reading these writes has made threads unable to
2471         //resolve promises
2472         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2473                 ModelAction *read = actions_to_check[i];
2474                 mo_check_promises(read, true);
2475         }
2476
2477         return true;
2478 }
2479
2480 /**
2481  * Compute the set of promises that could potentially be satisfied by this
2482  * action. Note that the set computation actually appears in the Node, not in
2483  * ModelExecution.
2484  * @param curr The ModelAction that may satisfy promises
2485  */
2486 void ModelExecution::compute_promises(ModelAction *curr)
2487 {
2488         for (unsigned int i = 0; i < promises.size(); i++) {
2489                 Promise *promise = promises[i];
2490                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2491                         continue;
2492
2493                 bool satisfy = true;
2494                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2495                         const ModelAction *act = promise->get_reader(j);
2496                         if (act->happens_before(curr) ||
2497                                         act->could_synchronize_with(curr)) {
2498                                 satisfy = false;
2499                                 break;
2500                         }
2501                 }
2502                 if (satisfy)
2503                         curr->get_node()->set_promise(i);
2504         }
2505 }
2506
2507 /** Checks promises in response to change in ClockVector Threads. */
2508 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2509 {
2510         for (unsigned int i = 0; i < promises.size(); i++) {
2511                 Promise *promise = promises[i];
2512                 if (!promise->thread_is_available(tid))
2513                         continue;
2514                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2515                         const ModelAction *act = promise->get_reader(j);
2516                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2517                                         merge_cv->synchronized_since(act)) {
2518                                 if (promise->eliminate_thread(tid)) {
2519                                         /* Promise has failed */
2520                                         priv->failed_promise = true;
2521                                         return;
2522                                 }
2523                         }
2524                 }
2525         }
2526 }
2527
2528 void ModelExecution::check_promises_thread_disabled()
2529 {
2530         for (unsigned int i = 0; i < promises.size(); i++) {
2531                 Promise *promise = promises[i];
2532                 if (promise->has_failed()) {
2533                         priv->failed_promise = true;
2534                         return;
2535                 }
2536         }
2537 }
2538
2539 /**
2540  * @brief Checks promises in response to addition to modification order for
2541  * threads.
2542  *
2543  * We test whether threads are still available for satisfying promises after an
2544  * addition to our modification order constraints. Those that are unavailable
2545  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2546  * that promise has failed.
2547  *
2548  * @param act The ModelAction which updated the modification order
2549  * @param is_read_check Should be true if act is a read and we must check for
2550  * updates to the store from which it read (there is a distinction here for
2551  * RMW's, which are both a load and a store)
2552  */
2553 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2554 {
2555         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2556
2557         for (unsigned int i = 0; i < promises.size(); i++) {
2558                 Promise *promise = promises[i];
2559
2560                 // Is this promise on the same location?
2561                 if (!promise->same_location(write))
2562                         continue;
2563
2564                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2565                         const ModelAction *pread = promise->get_reader(j);
2566                         if (!pread->happens_before(act))
2567                                continue;
2568                         if (mo_graph->checkPromise(write, promise)) {
2569                                 priv->hard_failed_promise = true;
2570                                 return;
2571                         }
2572                         break;
2573                 }
2574
2575                 // Don't do any lookups twice for the same thread
2576                 if (!promise->thread_is_available(act->get_tid()))
2577                         continue;
2578
2579                 if (mo_graph->checkReachable(promise, write)) {
2580                         if (mo_graph->checkPromise(write, promise)) {
2581                                 priv->hard_failed_promise = true;
2582                                 return;
2583                         }
2584                 }
2585         }
2586 }
2587
2588 /**
2589  * Compute the set of writes that may break the current pending release
2590  * sequence. This information is extracted from previou release sequence
2591  * calculations.
2592  *
2593  * @param curr The current ModelAction. Must be a release sequence fixup
2594  * action.
2595  */
2596 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2597 {
2598         if (pending_rel_seqs.empty())
2599                 return;
2600
2601         struct release_seq *pending = pending_rel_seqs.back();
2602         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2603                 const ModelAction *write = pending->writes[i];
2604                 curr->get_node()->add_relseq_break(write);
2605         }
2606
2607         /* NULL means don't break the sequence; just synchronize */
2608         curr->get_node()->add_relseq_break(NULL);
2609 }
2610
2611 /**
2612  * Build up an initial set of all past writes that this 'read' action may read
2613  * from, as well as any previously-observed future values that must still be valid.
2614  *
2615  * @param curr is the current ModelAction that we are exploring; it must be a
2616  * 'read' operation.
2617  */
2618 void ModelExecution::build_may_read_from(ModelAction *curr)
2619 {
2620         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2621         unsigned int i;
2622         ASSERT(curr->is_read());
2623
2624         ModelAction *last_sc_write = NULL;
2625
2626         if (curr->is_seqcst())
2627                 last_sc_write = get_last_seq_cst_write(curr);
2628
2629         /* Iterate over all threads */
2630         for (i = 0; i < thrd_lists->size(); i++) {
2631                 /* Iterate over actions in thread, starting from most recent */
2632                 action_list_t *list = &(*thrd_lists)[i];
2633                 action_list_t::reverse_iterator rit;
2634                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2635                         ModelAction *act = *rit;
2636
2637                         /* Only consider 'write' actions */
2638                         if (!act->is_write() || act == curr)
2639                                 continue;
2640
2641                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2642                         bool allow_read = true;
2643
2644                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2645                                 allow_read = false;
2646                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2647                                 allow_read = false;
2648
2649                         if (allow_read) {
2650                                 /* Only add feasible reads */
2651                                 mo_graph->startChanges();
2652                                 r_modification_order(curr, act);
2653                                 if (!is_infeasible())
2654                                         curr->get_node()->add_read_from_past(act);
2655                                 mo_graph->rollbackChanges();
2656                         }
2657
2658                         /* Include at most one act per-thread that "happens before" curr */
2659                         if (act->happens_before(curr))
2660                                 break;
2661                 }
2662         }
2663
2664         /* Inherit existing, promised future values */
2665         for (i = 0; i < promises.size(); i++) {
2666                 const Promise *promise = promises[i];
2667                 const ModelAction *promise_read = promise->get_reader(0);
2668                 if (promise_read->same_var(curr)) {
2669                         /* Only add feasible future-values */
2670                         mo_graph->startChanges();
2671                         r_modification_order(curr, promise);
2672                         if (!is_infeasible())
2673                                 curr->get_node()->add_read_from_promise(promise_read);
2674                         mo_graph->rollbackChanges();
2675                 }
2676         }
2677
2678         /* We may find no valid may-read-from only if the execution is doomed */
2679         if (!curr->get_node()->read_from_size()) {
2680                 priv->no_valid_reads = true;
2681                 set_assert();
2682         }
2683
2684         if (DBG_ENABLED()) {
2685                 model_print("Reached read action:\n");
2686                 curr->print();
2687                 model_print("Printing read_from_past\n");
2688                 curr->get_node()->print_read_from_past();
2689                 model_print("End printing read_from_past\n");
2690         }
2691 }
2692
2693 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2694 {
2695         for ( ; write != NULL; write = write->get_reads_from()) {
2696                 /* UNINIT actions don't have a Node, and they never sleep */
2697                 if (write->is_uninitialized())
2698                         return true;
2699                 Node *prevnode = write->get_node()->get_parent();
2700
2701                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2702                 if (write->is_release() && thread_sleep)
2703                         return true;
2704                 if (!write->is_rmw())
2705                         return false;
2706         }
2707         return true;
2708 }
2709
2710 /**
2711  * @brief Get an action representing an uninitialized atomic
2712  *
2713  * This function may create a new one or try to retrieve one from the NodeStack
2714  *
2715  * @param curr The current action, which prompts the creation of an UNINIT action
2716  * @return A pointer to the UNINIT ModelAction
2717  */
2718 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2719 {
2720         Node *node = curr->get_node();
2721         ModelAction *act = node->get_uninit_action();
2722         if (!act) {
2723                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2724                 node->set_uninit_action(act);
2725         }
2726         act->create_cv(NULL);
2727         return act;
2728 }
2729
2730 static void print_list(const action_list_t *list)
2731 {
2732         action_list_t::const_iterator it;
2733
2734         model_print("------------------------------------------------------------------------------------\n");
2735         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2736         model_print("------------------------------------------------------------------------------------\n");
2737
2738         unsigned int hash = 0;
2739
2740         for (it = list->begin(); it != list->end(); it++) {
2741                 const ModelAction *act = *it;
2742                 if (act->get_seq_number() > 0)
2743                         act->print();
2744                 hash = hash^(hash<<3)^((*it)->hash());
2745         }
2746         model_print("HASH %u\n", hash);
2747         model_print("------------------------------------------------------------------------------------\n");
2748 }
2749
2750 #if SUPPORT_MOD_ORDER_DUMP
2751 void ModelExecution::dumpGraph(char *filename) const
2752 {
2753         char buffer[200];
2754         sprintf(buffer, "%s.dot", filename);
2755         FILE *file = fopen(buffer, "w");
2756         fprintf(file, "digraph %s {\n", filename);
2757         mo_graph->dumpNodes(file);
2758         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2759
2760         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2761                 ModelAction *act = *it;
2762                 if (act->is_read()) {
2763                         mo_graph->dot_print_node(file, act);
2764                         if (act->get_reads_from())
2765                                 mo_graph->dot_print_edge(file,
2766                                                 act->get_reads_from(),
2767                                                 act,
2768                                                 "label=\"rf\", color=red, weight=2");
2769                         else
2770                                 mo_graph->dot_print_edge(file,
2771                                                 act->get_reads_from_promise(),
2772                                                 act,
2773                                                 "label=\"rf\", color=red");
2774                 }
2775                 if (thread_array[act->get_tid()]) {
2776                         mo_graph->dot_print_edge(file,
2777                                         thread_array[id_to_int(act->get_tid())],
2778                                         act,
2779                                         "label=\"sb\", color=blue, weight=400");
2780                 }
2781
2782                 thread_array[act->get_tid()] = act;
2783         }
2784         fprintf(file, "}\n");
2785         model_free(thread_array);
2786         fclose(file);
2787 }
2788 #endif
2789
2790 /** @brief Prints an execution trace summary. */
2791 void ModelExecution::print_summary() const
2792 {
2793 #if SUPPORT_MOD_ORDER_DUMP
2794         char buffername[100];
2795         sprintf(buffername, "exec%04u", get_execution_number());
2796         mo_graph->dumpGraphToFile(buffername);
2797         sprintf(buffername, "graph%04u", get_execution_number());
2798         dumpGraph(buffername);
2799 #endif
2800
2801         model_print("Execution trace %d:", get_execution_number());
2802         if (isfeasibleprefix()) {
2803                 if (is_yieldblocked())
2804                         model_print(" YIELD BLOCKED");
2805                 if (scheduler->all_threads_sleeping())
2806                         model_print(" SLEEP-SET REDUNDANT");
2807                 if (have_bug_reports())
2808                         model_print(" DETECTED BUG(S)");
2809         } else
2810                 print_infeasibility(" INFEASIBLE");
2811         model_print("\n");
2812
2813         print_list(&action_trace);
2814         model_print("\n");
2815
2816         if (!promises.empty()) {
2817                 model_print("Pending promises:\n");
2818                 for (unsigned int i = 0; i < promises.size(); i++) {
2819                         model_print(" [P%u] ", i);
2820                         promises[i]->print();
2821                 }
2822                 model_print("\n");
2823         }
2824 }
2825
2826 /**
2827  * Add a Thread to the system for the first time. Should only be called once
2828  * per thread.
2829  * @param t The Thread to add
2830  */
2831 void ModelExecution::add_thread(Thread *t)
2832 {
2833         unsigned int i = id_to_int(t->get_id());
2834         if (i >= thread_map.size())
2835                 thread_map.resize(i + 1);
2836         thread_map[i] = t;
2837         if (!t->is_model_thread())
2838                 scheduler->add_thread(t);
2839 }
2840
2841 /**
2842  * @brief Get a Thread reference by its ID
2843  * @param tid The Thread's ID
2844  * @return A Thread reference
2845  */
2846 Thread * ModelExecution::get_thread(thread_id_t tid) const
2847 {
2848         unsigned int i = id_to_int(tid);
2849         if (i < thread_map.size())
2850                 return thread_map[i];
2851         return NULL;
2852 }
2853
2854 /**
2855  * @brief Get a reference to the Thread in which a ModelAction was executed
2856  * @param act The ModelAction
2857  * @return A Thread reference
2858  */
2859 Thread * ModelExecution::get_thread(const ModelAction *act) const
2860 {
2861         return get_thread(act->get_tid());
2862 }
2863
2864 /**
2865  * @brief Get a Thread reference by its pthread ID
2866  * @param index The pthread's ID
2867  * @return A Thread reference
2868  */
2869 Thread * ModelExecution::get_pthread(pthread_t pid) {
2870         if (pid < pthread_counter + 1) return pthread_map[pid];
2871         else return NULL;
2872 }
2873
2874 /**
2875  * @brief Get a Promise's "promise number"
2876  *
2877  * A "promise number" is an index number that is unique to a promise, valid
2878  * only for a specific snapshot of an execution trace. Promises may come and go
2879  * as they are generated an resolved, so an index only retains meaning for the
2880  * current snapshot.
2881  *
2882  * @param promise The Promise to check
2883  * @return The promise index, if the promise still is valid; otherwise -1
2884  */
2885 int ModelExecution::get_promise_number(const Promise *promise) const
2886 {
2887         for (unsigned int i = 0; i < promises.size(); i++)
2888                 if (promises[i] == promise)
2889                         return i;
2890         /* Not found */
2891         return -1;
2892 }
2893
2894 /**
2895  * @brief Check if a Thread is currently enabled
2896  * @param t The Thread to check
2897  * @return True if the Thread is currently enabled
2898  */
2899 bool ModelExecution::is_enabled(Thread *t) const
2900 {
2901         return scheduler->is_enabled(t);
2902 }
2903
2904 /**
2905  * @brief Check if a Thread is currently enabled
2906  * @param tid The ID of the Thread to check
2907  * @return True if the Thread is currently enabled
2908  */
2909 bool ModelExecution::is_enabled(thread_id_t tid) const
2910 {
2911         return scheduler->is_enabled(tid);
2912 }
2913
2914 /**
2915  * @brief Select the next thread to execute based on the curren action
2916  *
2917  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2918  * actions should be followed by the execution of their child thread. In either
2919  * case, the current action should determine the next thread schedule.
2920  *
2921  * @param curr The current action
2922  * @return The next thread to run, if the current action will determine this
2923  * selection; otherwise NULL
2924  */
2925 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2926 {
2927         /* Do not split atomic RMW */
2928         if (curr->is_rmwr())
2929                 return get_thread(curr);
2930         if (curr->is_write()) {
2931 //              std::memory_order order = curr->get_mo(); 
2932 //              switch(order) {
2933 //                      case std::memory_order_relaxed: 
2934 //                              return get_thread(curr);
2935 //                      case std::memory_order_release:
2936 //                              return get_thread(curr);
2937 //                      defalut:
2938 //                              return NULL;
2939 //              }       
2940                 return NULL;
2941         }
2942
2943         /* Follow CREATE with the created thread */
2944         /* which is not needed, because model.cc takes care of this */
2945         if (curr->get_type() == THREAD_CREATE)
2946                 return curr->get_thread_operand(); 
2947         if (curr->get_type() == PTHREAD_CREATE) {
2948                 return curr->get_thread_operand();
2949         }
2950         return NULL;
2951 }
2952
2953 /** @return True if the execution has taken too many steps */
2954 bool ModelExecution::too_many_steps() const
2955 {
2956         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2957 }
2958
2959 /**
2960  * Takes the next step in the execution, if possible.
2961  * @param curr The current step to take
2962  * @return Returns the next Thread to run, if any; NULL if this execution
2963  * should terminate
2964  */
2965 Thread * ModelExecution::take_step(ModelAction *curr)
2966 {
2967         Thread *curr_thrd = get_thread(curr);
2968         ASSERT(curr_thrd->get_state() == THREAD_READY);
2969
2970         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2971         curr = check_current_action(curr);
2972         ASSERT(curr);
2973
2974         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2975                 scheduler->remove_thread(curr_thrd);
2976
2977         return action_select_next_thread(curr);
2978 }
2979
2980 /**
2981  * Launch end-of-execution release sequence fixups only when
2982  * the execution is otherwise feasible AND there are:
2983  *
2984  * (1) pending release sequences
2985  * (2) pending assertions that could be invalidated by a change
2986  * in clock vectors (i.e., data races)
2987  * (3) no pending promises
2988  */
2989 void ModelExecution::fixup_release_sequences()
2990 {
2991         while (!pending_rel_seqs.empty() &&
2992                         is_feasible_prefix_ignore_relseq() &&
2993                         haveUnrealizedRaces()) {
2994                 model_print("*** WARNING: release sequence fixup action "
2995                                 "(%zu pending release seuqence(s)) ***\n",
2996                                 pending_rel_seqs.size());
2997                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2998                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2999                                 model_thread);
3000                 take_step(fixup);
3001         };
3002 }