partial conversion to fuzzer
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 /**
21  * Structure for holding small ModelChecker members that should be snapshotted
22  */
23 struct model_snapshot_members {
24         model_snapshot_members() :
25                 /* First thread created will have id INITIAL_THREAD_ID */
26                 next_thread_id(INITIAL_THREAD_ID),
27                 used_sequence_numbers(0),
28                 next_backtrack(NULL),
29                 bugs(),
30                 failed_promise(false),
31                 hard_failed_promise(false),
32                 too_many_reads(false),
33                 no_valid_reads(false),
34                 bad_synchronization(false),
35                 bad_sc_read(false),
36                 asserted(false)
37         { }
38
39         ~model_snapshot_members() {
40                 for (unsigned int i = 0; i < bugs.size(); i++)
41                         delete bugs[i];
42                 bugs.clear();
43         }
44
45         unsigned int next_thread_id;
46         modelclock_t used_sequence_numbers;
47         ModelAction *next_backtrack;
48         SnapVector<bug_message *> bugs;
49         bool failed_promise;
50         bool hard_failed_promise;
51         bool too_many_reads;
52         bool no_valid_reads;
53         /** @brief Incorrectly-ordered synchronization was made */
54         bool bad_synchronization;
55         bool bad_sc_read;
56         bool asserted;
57
58         SNAPSHOTALLOC
59 };
60
61 /** @brief Constructor */
62 ModelExecution::ModelExecution(ModelChecker *m,
63                 const struct model_params *params,
64                 Scheduler *scheduler,
65                 NodeStack *node_stack) :
66         model(m),
67         params(params),
68         scheduler(scheduler),
69         action_trace(),
70         thread_map(2), /* We'll always need at least 2 threads */
71         pthread_map(0),
72         pthread_counter(0),
73         obj_map(),
74         condvar_waiters_map(),
75         obj_thrd_map(),
76         mutex_map(),
77         promises(),
78         futurevalues(),
79         pending_rel_seqs(),
80         thrd_last_action(1),
81         thrd_last_fence_release(),
82         node_stack(node_stack),
83         priv(new struct model_snapshot_members()),
84         mo_graph(new CycleGraph())
85 {
86         /* Initialize a model-checker thread, for special ModelActions */
87         model_thread = new Thread(get_next_id());       // L: Create model thread
88         add_thread(model_thread);                       // L: Add model thread to scheduler
89         scheduler->register_engine(this);
90         node_stack->register_engine(this);
91 }
92
93 /** @brief Destructor */
94 ModelExecution::~ModelExecution()
95 {
96         for (unsigned int i = 0; i < get_num_threads(); i++)
97                 delete get_thread(int_to_id(i));
98
99         for (unsigned int i = 0; i < promises.size(); i++)
100                 delete promises[i];
101
102         delete mo_graph;
103         delete priv;
104 }
105
106 int ModelExecution::get_execution_number() const
107 {
108         return model->get_execution_number();
109 }
110
111 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
112 {
113         action_list_t *tmp = hash->get(ptr);
114         if (tmp == NULL) {
115                 tmp = new action_list_t();
116                 hash->put(ptr, tmp);
117         }
118         return tmp;
119 }
120
121 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
122 {
123         SnapVector<action_list_t> *tmp = hash->get(ptr);
124         if (tmp == NULL) {
125                 tmp = new SnapVector<action_list_t>();
126                 hash->put(ptr, tmp);
127         }
128         return tmp;
129 }
130
131 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
132 {
133         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
134         if (wrv==NULL)
135                 return NULL;
136         unsigned int thread=id_to_int(tid);
137         if (thread < wrv->size())
138                 return &(*wrv)[thread];
139         else
140                 return NULL;
141 }
142
143 /** @return a thread ID for a new Thread */
144 thread_id_t ModelExecution::get_next_id()
145 {
146         return priv->next_thread_id++;
147 }
148
149 /** @return the number of user threads created during this execution */
150 unsigned int ModelExecution::get_num_threads() const
151 {
152         return priv->next_thread_id;
153 }
154
155 /** @return a sequence number for a new ModelAction */
156 modelclock_t ModelExecution::get_next_seq_num()
157 {
158         return ++priv->used_sequence_numbers;
159 }
160
161 /**
162  * @brief Should the current action wake up a given thread?
163  *
164  * @param curr The current action
165  * @param thread The thread that we might wake up
166  * @return True, if we should wake up the sleeping thread; false otherwise
167  */
168 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
169 {
170         const ModelAction *asleep = thread->get_pending();
171         /* Don't allow partial RMW to wake anyone up */
172         if (curr->is_rmwr())
173                 return false;
174         /* Synchronizing actions may have been backtracked */
175         if (asleep->could_synchronize_with(curr))
176                 return true;
177         /* All acquire/release fences and fence-acquire/store-release */
178         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
179                 return true;
180         /* Fence-release + store can awake load-acquire on the same location */
181         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
182                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
183                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
184                         return true;
185         }
186         return false;
187 }
188
189 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
190 {
191         for (unsigned int i = 0; i < get_num_threads(); i++) {
192                 Thread *thr = get_thread(int_to_id(i));
193                 if (scheduler->is_sleep_set(thr)) {
194                         if (should_wake_up(curr, thr))
195                                 /* Remove this thread from sleep set */
196                                 scheduler->remove_sleep(thr);
197                 }
198         }
199 }
200
201 /** @brief Alert the model-checker that an incorrectly-ordered
202  * synchronization was made */
203 void ModelExecution::set_bad_synchronization()
204 {
205         priv->bad_synchronization = true;
206 }
207
208 /** @brief Alert the model-checker that an incorrectly-ordered
209  * synchronization was made */
210 void ModelExecution::set_bad_sc_read()
211 {
212         priv->bad_sc_read = true;
213 }
214
215 bool ModelExecution::assert_bug(const char *msg)
216 {
217         priv->bugs.push_back(new bug_message(msg));
218
219         if (isfeasibleprefix()) {
220                 set_assert();
221                 return true;
222         }
223         return false;
224 }
225
226 /** @return True, if any bugs have been reported for this execution */
227 bool ModelExecution::have_bug_reports() const
228 {
229         return priv->bugs.size() != 0;
230 }
231
232 SnapVector<bug_message *> * ModelExecution::get_bugs() const
233 {
234         return &priv->bugs;
235 }
236
237 /**
238  * Check whether the current trace has triggered an assertion which should halt
239  * its execution.
240  *
241  * @return True, if the execution should be aborted; false otherwise
242  */
243 bool ModelExecution::has_asserted() const
244 {
245         return priv->asserted;
246 }
247
248 /**
249  * Trigger a trace assertion which should cause this execution to be halted.
250  * This can be due to a detected bug or due to an infeasibility that should
251  * halt ASAP.
252  */
253 void ModelExecution::set_assert()
254 {
255         priv->asserted = true;
256 }
257
258 /**
259  * Check if we are in a deadlock. Should only be called at the end of an
260  * execution, although it should not give false positives in the middle of an
261  * execution (there should be some ENABLED thread).
262  *
263  * @return True if program is in a deadlock; false otherwise
264  */
265 bool ModelExecution::is_deadlocked() const
266 {
267         bool blocking_threads = false;
268         for (unsigned int i = 0; i < get_num_threads(); i++) {
269                 thread_id_t tid = int_to_id(i);
270                 if (is_enabled(tid))
271                         return false;
272                 Thread *t = get_thread(tid);
273                 if (!t->is_model_thread() && t->get_pending())
274                         blocking_threads = true;
275         }
276         return blocking_threads;
277 }
278
279 /**
280  * @brief Check if we are yield-blocked
281  *
282  * A program can be "yield-blocked" if all threads are ready to execute a
283  * yield.
284  *
285  * @return True if the program is yield-blocked; false otherwise
286  */
287 bool ModelExecution::is_yieldblocked() const
288 {
289         if (!params->yieldblock)
290                 return false;
291
292         for (unsigned int i = 0; i < get_num_threads(); i++) {
293                 thread_id_t tid = int_to_id(i);
294                 Thread *t = get_thread(tid);
295                 if (t->get_pending() && t->get_pending()->is_yield())
296                         return true;
297         }
298         return false;
299 }
300
301 /**
302  * Check if this is a complete execution. That is, have all thread completed
303  * execution (rather than exiting because sleep sets have forced a redundant
304  * execution).
305  *
306  * @return True if the execution is complete.
307  */
308 bool ModelExecution::is_complete_execution() const
309 {
310         if (is_yieldblocked())
311                 return false;
312         for (unsigned int i = 0; i < get_num_threads(); i++)
313                 if (is_enabled(int_to_id(i)))
314                         return false;
315         return true;
316 }
317
318 /**
319  * @brief Find the last fence-related backtracking conflict for a ModelAction
320  *
321  * This function performs the search for the most recent conflicting action
322  * against which we should perform backtracking, as affected by fence
323  * operations. This includes pairs of potentially-synchronizing actions which
324  * occur due to fence-acquire or fence-release, and hence should be explored in
325  * the opposite execution order.
326  *
327  * @param act The current action
328  * @return The most recent action which conflicts with act due to fences
329  */
330 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
331 {
332         /* Only perform release/acquire fence backtracking for stores */
333         if (!act->is_write())
334                 return NULL;
335
336         /* Find a fence-release (or, act is a release) */
337         ModelAction *last_release;
338         if (act->is_release())
339                 last_release = act;
340         else
341                 last_release = get_last_fence_release(act->get_tid());
342         if (!last_release)
343                 return NULL;
344
345         /* Skip past the release */
346         const action_list_t *list = &action_trace;
347         action_list_t::const_reverse_iterator rit;
348         for (rit = list->rbegin(); rit != list->rend(); rit++)
349                 if (*rit == last_release)
350                         break;
351         ASSERT(rit != list->rend());
352
353         /* Find a prior:
354          *   load-acquire
355          * or
356          *   load --sb-> fence-acquire */
357         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
358         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
359         bool found_acquire_fences = false;
360         for ( ; rit != list->rend(); rit++) {
361                 ModelAction *prev = *rit;
362                 if (act->same_thread(prev))
363                         continue;
364
365                 int tid = id_to_int(prev->get_tid());
366
367                 if (prev->is_read() && act->same_var(prev)) {
368                         if (prev->is_acquire()) {
369                                 /* Found most recent load-acquire, don't need
370                                  * to search for more fences */
371                                 if (!found_acquire_fences)
372                                         return NULL;
373                         } else {
374                                 prior_loads[tid] = prev;
375                         }
376                 }
377                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
378                         found_acquire_fences = true;
379                         acquire_fences[tid] = prev;
380                 }
381         }
382
383         ModelAction *latest_backtrack = NULL;
384         for (unsigned int i = 0; i < acquire_fences.size(); i++)
385                 if (acquire_fences[i] && prior_loads[i])
386                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
387                                 latest_backtrack = acquire_fences[i];
388         return latest_backtrack;
389 }
390
391 /**
392  * @brief Find the last backtracking conflict for a ModelAction
393  *
394  * This function performs the search for the most recent conflicting action
395  * against which we should perform backtracking. This primary includes pairs of
396  * synchronizing actions which should be explored in the opposite execution
397  * order.
398  *
399  * @param act The current action
400  * @return The most recent action which conflicts with act
401  */
402 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
403 {
404         switch (act->get_type()) {
405         case ATOMIC_FENCE:
406                 /* Only seq-cst fences can (directly) cause backtracking */
407                 if (!act->is_seqcst())
408                         break;
409         case ATOMIC_READ:
410         case ATOMIC_WRITE:
411         case ATOMIC_RMW: {
412                 ModelAction *ret = NULL;
413
414                 /* linear search: from most recent to oldest */
415                 action_list_t *list = obj_map.get(act->get_location());
416                 action_list_t::reverse_iterator rit;
417                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
418                         ModelAction *prev = *rit;
419                         if (prev == act)
420                                 continue;
421                         if (prev->could_synchronize_with(act)) {
422                                 ret = prev;
423                                 break;
424                         }
425                 }
426
427                 ModelAction *ret2 = get_last_fence_conflict(act);
428                 if (!ret2)
429                         return ret;
430                 if (!ret)
431                         return ret2;
432                 if (*ret < *ret2)
433                         return ret2;
434                 return ret;
435         }
436         case ATOMIC_LOCK:
437         case ATOMIC_TRYLOCK: {
438                 /* linear search: from most recent to oldest */
439                 action_list_t *list = obj_map.get(act->get_location());
440                 action_list_t::reverse_iterator rit;
441                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
442                         ModelAction *prev = *rit;
443                         if (act->is_conflicting_lock(prev))
444                                 return prev;
445                 }
446                 break;
447         }
448         case ATOMIC_UNLOCK: {
449                 /* linear search: from most recent to oldest */
450                 action_list_t *list = obj_map.get(act->get_location());
451                 action_list_t::reverse_iterator rit;
452                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
453                         ModelAction *prev = *rit;
454                         if (!act->same_thread(prev) && prev->is_failed_trylock())
455                                 return prev;
456                 }
457                 break;
458         }
459         case ATOMIC_WAIT: {
460                 /* linear search: from most recent to oldest */
461                 action_list_t *list = obj_map.get(act->get_location());
462                 action_list_t::reverse_iterator rit;
463                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
464                         ModelAction *prev = *rit;
465                         if (!act->same_thread(prev) && prev->is_failed_trylock())
466                                 return prev;
467                         if (!act->same_thread(prev) && prev->is_notify())
468                                 return prev;
469                 }
470                 break;
471         }
472
473         case ATOMIC_NOTIFY_ALL:
474         case ATOMIC_NOTIFY_ONE: {
475                 /* linear search: from most recent to oldest */
476                 action_list_t *list = obj_map.get(act->get_location());
477                 action_list_t::reverse_iterator rit;
478                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
479                         ModelAction *prev = *rit;
480                         if (!act->same_thread(prev) && prev->is_wait())
481                                 return prev;
482                 }
483                 break;
484         }
485         default:
486                 break;
487         }
488         return NULL;
489 }
490
491 /** This method finds backtracking points where we should try to
492  * reorder the parameter ModelAction against.
493  *
494  * @param the ModelAction to find backtracking points for.
495  */
496 void ModelExecution::set_backtracking(ModelAction *act)
497 {
498         Thread *t = get_thread(act);
499         ModelAction *prev = get_last_conflict(act);
500         if (prev == NULL)
501                 return;
502
503         Node *node = prev->get_node()->get_parent();
504
505         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
506         int low_tid, high_tid;
507         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
508                 low_tid = id_to_int(act->get_tid());
509                 high_tid = low_tid + 1;
510         } else {
511                 low_tid = 0;
512                 high_tid = get_num_threads();
513         }
514
515         for (int i = low_tid; i < high_tid; i++) {
516                 thread_id_t tid = int_to_id(i);
517
518                 /* Make sure this thread can be enabled here. */
519                 if (i >= node->get_num_threads())
520                         break;
521
522                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
523                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
524                 if (node->enabled_status(tid) != THREAD_ENABLED)
525                         continue;
526
527                 /* Check if this has been explored already */
528                 if (node->has_been_explored(tid))
529                         continue;
530
531                 /* See if fairness allows */
532                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
533                         bool unfair = false;
534                         for (int t = 0; t < node->get_num_threads(); t++) {
535                                 thread_id_t tother = int_to_id(t);
536                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
537                                         unfair = true;
538                                         break;
539                                 }
540                         }
541                         if (unfair)
542                                 continue;
543                 }
544
545                 /* See if CHESS-like yield fairness allows */
546                 if (params->yieldon) {
547                         bool unfair = false;
548                         for (int t = 0; t < node->get_num_threads(); t++) {
549                                 thread_id_t tother = int_to_id(t);
550                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
551                                         unfair = true;
552                                         break;
553                                 }
554                         }
555                         if (unfair)
556                                 continue;
557                 }
558
559                 /* Cache the latest backtracking point */
560                 set_latest_backtrack(prev);
561
562                 /* If this is a new backtracking point, mark the tree */
563                 if (!node->set_backtrack(tid))
564                         continue;
565                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
566                                         id_to_int(prev->get_tid()),
567                                         id_to_int(t->get_id()));
568                 if (DBG_ENABLED()) {
569                         prev->print();
570                         act->print();
571                 }
572         }
573 }
574
575 /**
576  * @brief Cache the a backtracking point as the "most recent", if eligible
577  *
578  * Note that this does not prepare the NodeStack for this backtracking
579  * operation, it only caches the action on a per-execution basis
580  *
581  * @param act The operation at which we should explore a different next action
582  * (i.e., backtracking point)
583  * @return True, if this action is now the most recent backtracking point;
584  * false otherwise
585  */
586 bool ModelExecution::set_latest_backtrack(ModelAction *act)
587 {
588         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
589                 priv->next_backtrack = act;
590                 return true;
591         }
592         return false;
593 }
594
595 /**
596  * Returns last backtracking point. The model checker will explore a different
597  * path for this point in the next execution.
598  * @return The ModelAction at which the next execution should diverge.
599  */
600 ModelAction * ModelExecution::get_next_backtrack()
601 {
602         ModelAction *next = priv->next_backtrack;
603         priv->next_backtrack = NULL;
604         return next;
605 }
606
607 /**
608  * Processes a read model action.
609  * @param curr is the read model action to process.
610  * @return True if processing this read updates the mo_graph.
611  */
612 bool ModelExecution::process_read(ModelAction *curr)
613 {
614         Node *node = curr->get_node();
615         while (true) {
616                 bool updated = false;
617                 switch (node->get_read_from_status()) {
618                 case READ_FROM_PAST: {
619                         const ModelAction *rf = node->get_read_from_past();
620                         ASSERT(rf);
621
622                         mo_graph->startChanges();
623
624                         ASSERT(!is_infeasible());
625                         if (!check_recency(curr, rf)) {
626                                 if (node->increment_read_from()) {
627                                         mo_graph->rollbackChanges();
628                                         continue;
629                                 } else {
630                                         priv->too_many_reads = true;
631                                 }
632                         }
633
634                         updated = r_modification_order(curr, rf);
635                         read_from(curr, rf);
636                         mo_graph->commitChanges();
637                         mo_check_promises(curr, true);
638                         break;
639                 }
640                 case READ_FROM_PROMISE: {
641                         Promise *promise = curr->get_node()->get_read_from_promise();
642                         if (promise->add_reader(curr))
643                                 priv->failed_promise = true;
644                         curr->set_read_from_promise(promise);
645                         mo_graph->startChanges();
646                         if (!check_recency(curr, promise))
647                                 priv->too_many_reads = true;
648                         updated = r_modification_order(curr, promise);
649                         mo_graph->commitChanges();
650                         break;
651                 }
652                 case READ_FROM_FUTURE: {
653                         /* Read from future value */
654                         struct future_value fv = node->get_future_value();
655                         Promise *promise = new Promise(this, curr, fv);
656                         curr->set_read_from_promise(promise);
657                         promises.push_back(promise);
658                         mo_graph->startChanges();
659                         updated = r_modification_order(curr, promise);
660                         mo_graph->commitChanges();
661                         break;
662                 }
663                 default:
664                         ASSERT(false);
665                 }
666                 get_thread(curr)->set_return_value(curr->get_return_value());
667                 return updated;
668         }
669 }
670
671 /**
672  * Processes a lock, trylock, or unlock model action.  @param curr is
673  * the read model action to process.
674  *
675  * The try lock operation checks whether the lock is taken.  If not,
676  * it falls to the normal lock operation case.  If so, it returns
677  * fail.
678  *
679  * The lock operation has already been checked that it is enabled, so
680  * it just grabs the lock and synchronizes with the previous unlock.
681  *
682  * The unlock operation has to re-enable all of the threads that are
683  * waiting on the lock.
684  *
685  * @return True if synchronization was updated; false otherwise
686  */
687 bool ModelExecution::process_mutex(ModelAction *curr)
688 {
689         cdsc::mutex *mutex = curr->get_mutex();
690         struct cdsc::mutex_state *state = NULL;
691
692         if (mutex)
693                 state = mutex->get_state();
694
695         switch (curr->get_type()) {
696         case ATOMIC_TRYLOCK: {
697                 bool success = !state->locked;
698                 curr->set_try_lock(success);
699                 if (!success) {
700                         get_thread(curr)->set_return_value(0);
701                         break;
702                 }
703                 get_thread(curr)->set_return_value(1);
704         }
705                 //otherwise fall into the lock case
706         case ATOMIC_LOCK: {
707                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
708                         assert_bug("Lock access before initialization");
709                 state->locked = get_thread(curr);
710                 ModelAction *unlock = get_last_unlock(curr);
711                 //synchronize with the previous unlock statement
712                 if (unlock != NULL) {
713                         synchronize(unlock, curr);
714                         return true;
715                 }
716                 break;
717         }
718         case ATOMIC_WAIT:
719         case ATOMIC_UNLOCK: {
720                 /* wake up the other threads */
721                 for (unsigned int i = 0; i < get_num_threads(); i++) {
722                         Thread *t = get_thread(int_to_id(i));
723                         Thread *curr_thrd = get_thread(curr);
724                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
725                                 scheduler->wake(t);
726                 }
727
728                 /* unlock the lock - after checking who was waiting on it */
729                 state->locked = NULL;
730
731                 if (!curr->is_wait())
732                         break; /* The rest is only for ATOMIC_WAIT */
733
734                 /* Should we go to sleep? (simulate spurious failures) */
735                 if (curr->get_node()->get_misc() == 0) {
736                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
737                         /* disable us */
738                         scheduler->sleep(get_thread(curr));
739                 }
740                 break;
741         }
742         case ATOMIC_NOTIFY_ALL: {
743                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
744                 //activate all the waiting threads
745                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
746                         scheduler->wake(get_thread(*rit));
747                 }
748                 waiters->clear();
749                 break;
750         }
751         case ATOMIC_NOTIFY_ONE: {
752                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
753                 int wakeupthread = curr->get_node()->get_misc();
754                 action_list_t::iterator it = waiters->begin();
755
756                 // WL
757                 if (it == waiters->end())
758                         break;
759
760                 advance(it, wakeupthread);
761                 scheduler->wake(get_thread(*it));
762                 waiters->erase(it);
763                 break;
764         }
765
766         default:
767                 ASSERT(0);
768         }
769         return false;
770 }
771
772 /**
773  * @brief Check if the current pending promises allow a future value to be sent
774  *
775  * It is unsafe to pass a future value back if there exists a pending promise Pr
776  * such that:
777  *
778  *    reader --exec-> Pr --exec-> writer
779  *
780  * If such Pr exists, we must save the pending future value until Pr is
781  * resolved.
782  *
783  * @param writer The operation which sends the future value. Must be a write.
784  * @param reader The operation which will observe the value. Must be a read.
785  * @return True if the future value can be sent now; false if it must wait.
786  */
787 bool ModelExecution::promises_may_allow(const ModelAction *writer,
788                 const ModelAction *reader) const
789 {
790         for (int i = promises.size() - 1; i >= 0; i--) {
791                 ModelAction *pr = promises[i]->get_reader(0);
792                 //reader is after promise...doesn't cross any promise
793                 if (*reader > *pr)
794                         return true;
795                 //writer is after promise, reader before...bad...
796                 if (*writer > *pr)
797                         return false;
798         }
799         return true;
800 }
801
802 /**
803  * @brief Add a future value to a reader
804  *
805  * This function performs a few additional checks to ensure that the future
806  * value can be feasibly observed by the reader
807  *
808  * @param writer The operation whose value is sent. Must be a write.
809  * @param reader The read operation which may read the future value. Must be a read.
810  */
811 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
812 {
813         /* Do more ambitious checks now that mo is more complete */
814         if (!mo_may_allow(writer, reader))
815                 return;
816
817         Node *node = reader->get_node();
818
819         /* Find an ancestor thread which exists at the time of the reader */
820         Thread *write_thread = get_thread(writer);
821         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
822                 write_thread = write_thread->get_parent();
823
824         struct future_value fv = {
825                 writer->get_write_value(),
826                 writer->get_seq_number() + params->maxfuturedelay,
827                 write_thread->get_id(),
828         };
829         if (node->add_future_value(fv))
830                 set_latest_backtrack(reader);
831 }
832
833 /**
834  * Process a write ModelAction
835  * @param curr The ModelAction to process
836  * @param work The work queue, for adding fixup work
837  * @return True if the mo_graph was updated or promises were resolved
838  */
839 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
840 {
841         /* Readers to which we may send our future value */
842         ModelVector<ModelAction *> send_fv;
843
844         const ModelAction *earliest_promise_reader;
845         bool updated_promises = false;
846
847         bool updated_mod_order = w_modification_order(curr, &send_fv);
848         Promise *promise = pop_promise_to_resolve(curr);
849
850         if (promise) {
851                 earliest_promise_reader = promise->get_reader(0);
852                 updated_promises = resolve_promise(curr, promise, work);
853         } else
854                 earliest_promise_reader = NULL;
855
856         for (unsigned int i = 0; i < send_fv.size(); i++) {
857                 ModelAction *read = send_fv[i];
858
859                 /* Don't send future values to reads after the Promise we resolve */
860                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
861                         /* Check if future value can be sent immediately */
862                         if (promises_may_allow(curr, read)) {
863                                 add_future_value(curr, read);
864                         } else {
865                                 futurevalues.push_back(PendingFutureValue(curr, read));
866                         }
867                 }
868         }
869
870         /* Check the pending future values */
871         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
872                 struct PendingFutureValue pfv = futurevalues[i];
873                 if (promises_may_allow(pfv.writer, pfv.reader)) {
874                         add_future_value(pfv.writer, pfv.reader);
875                         futurevalues.erase(futurevalues.begin() + i);
876                 }
877         }
878
879         mo_graph->commitChanges();
880         mo_check_promises(curr, false);
881
882         get_thread(curr)->set_return_value(VALUE_NONE);
883         return updated_mod_order || updated_promises;
884 }
885
886 /**
887  * Process a fence ModelAction
888  * @param curr The ModelAction to process
889  * @return True if synchronization was updated
890  */
891 bool ModelExecution::process_fence(ModelAction *curr)
892 {
893         /*
894          * fence-relaxed: no-op
895          * fence-release: only log the occurence (not in this function), for
896          *   use in later synchronization
897          * fence-acquire (this function): search for hypothetical release
898          *   sequences
899          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
900          */
901         bool updated = false;
902         if (curr->is_acquire()) {
903                 action_list_t *list = &action_trace;
904                 action_list_t::reverse_iterator rit;
905                 /* Find X : is_read(X) && X --sb-> curr */
906                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
907                         ModelAction *act = *rit;
908                         if (act == curr)
909                                 continue;
910                         if (act->get_tid() != curr->get_tid())
911                                 continue;
912                         /* Stop at the beginning of the thread */
913                         if (act->is_thread_start())
914                                 break;
915                         /* Stop once we reach a prior fence-acquire */
916                         if (act->is_fence() && act->is_acquire())
917                                 break;
918                         if (!act->is_read())
919                                 continue;
920                         /* read-acquire will find its own release sequences */
921                         if (act->is_acquire())
922                                 continue;
923
924                         /* Establish hypothetical release sequences */
925                         rel_heads_list_t release_heads;
926                         get_release_seq_heads(curr, act, &release_heads);
927                         for (unsigned int i = 0; i < release_heads.size(); i++)
928                                 synchronize(release_heads[i], curr);
929                         if (release_heads.size() != 0)
930                                 updated = true;
931                 }
932         }
933         return updated;
934 }
935
936 /**
937  * @brief Process the current action for thread-related activity
938  *
939  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
940  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
941  * synchronization, etc.  This function is a no-op for non-THREAD actions
942  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
943  *
944  * @param curr The current action
945  * @return True if synchronization was updated or a thread completed
946  */
947 bool ModelExecution::process_thread_action(ModelAction *curr)
948 {
949         bool updated = false;
950
951         switch (curr->get_type()) {
952         case THREAD_CREATE: {
953                 thrd_t *thrd = (thrd_t *)curr->get_location();
954                 struct thread_params *params = (struct thread_params *)curr->get_value();
955                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
956                 curr->set_thread_operand(th);
957                 add_thread(th);
958                 th->set_creation(curr);
959                 /* Promises can be satisfied by children */
960                 for (unsigned int i = 0; i < promises.size(); i++) {
961                         Promise *promise = promises[i];
962                         if (promise->thread_is_available(curr->get_tid()))
963                                 promise->add_thread(th->get_id());
964                 }
965                 break;
966         }
967         case PTHREAD_CREATE: {
968                 (*(pthread_t *)curr->get_location()) = pthread_counter++;       
969
970                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
971                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
972                 curr->set_thread_operand(th);
973                 add_thread(th);
974                 th->set_creation(curr);
975
976                 if ( pthread_map.size() < pthread_counter )
977                         pthread_map.resize( pthread_counter );
978                 pthread_map[ pthread_counter-1 ] = th;
979
980                 /* Promises can be satisfied by children */
981                 for (unsigned int i = 0; i < promises.size(); i++) {
982                         Promise *promise = promises[i];
983                         if (promise->thread_is_available(curr->get_tid()))
984                                 promise->add_thread(th->get_id());
985                 }
986                 
987                 break;
988         }
989         case THREAD_JOIN: {
990                 Thread *blocking = curr->get_thread_operand();
991                 ModelAction *act = get_last_action(blocking->get_id());
992                 synchronize(act, curr);
993                 updated = true; /* trigger rel-seq checks */
994                 break;
995         }
996         case PTHREAD_JOIN: {
997                 Thread *blocking = curr->get_thread_operand();
998                 ModelAction *act = get_last_action(blocking->get_id());
999                 synchronize(act, curr);
1000                 updated = true; /* trigger rel-seq checks */
1001                 break; // WL: to be add (modified)
1002         }
1003
1004         case THREAD_FINISH: {
1005                 Thread *th = get_thread(curr);
1006                 /* Wake up any joining threads */
1007                 for (unsigned int i = 0; i < get_num_threads(); i++) {
1008                         Thread *waiting = get_thread(int_to_id(i));
1009                         if (waiting->waiting_on() == th &&
1010                                         waiting->get_pending()->is_thread_join())
1011                                 scheduler->wake(waiting);
1012                 }
1013                 th->complete();
1014                 /* Completed thread can't satisfy promises */
1015                 for (unsigned int i = 0; i < promises.size(); i++) {
1016                         Promise *promise = promises[i];
1017                         if (promise->thread_is_available(th->get_id()))
1018                                 if (promise->eliminate_thread(th->get_id()))
1019                                         priv->failed_promise = true;
1020                 }
1021                 updated = true; /* trigger rel-seq checks */
1022                 break;
1023         }
1024         case THREAD_START: {
1025                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1026                 break;
1027         }
1028         default:
1029                 break;
1030         }
1031
1032         return updated;
1033 }
1034
1035 /**
1036  * @brief Process the current action for release sequence fixup activity
1037  *
1038  * Performs model-checker release sequence fixups for the current action,
1039  * forcing a single pending release sequence to break (with a given, potential
1040  * "loose" write) or to complete (i.e., synchronize). If a pending release
1041  * sequence forms a complete release sequence, then we must perform the fixup
1042  * synchronization, mo_graph additions, etc.
1043  *
1044  * @param curr The current action; must be a release sequence fixup action
1045  * @param work_queue The work queue to which to add work items as they are
1046  * generated
1047  */
1048 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1049 {
1050         const ModelAction *write = curr->get_node()->get_relseq_break();
1051         struct release_seq *sequence = pending_rel_seqs.back();
1052         pending_rel_seqs.pop_back();
1053         ASSERT(sequence);
1054         ModelAction *acquire = sequence->acquire;
1055         const ModelAction *rf = sequence->rf;
1056         const ModelAction *release = sequence->release;
1057         ASSERT(acquire);
1058         ASSERT(release);
1059         ASSERT(rf);
1060         ASSERT(release->same_thread(rf));
1061
1062         if (write == NULL) {
1063                 /**
1064                  * @todo Forcing a synchronization requires that we set
1065                  * modification order constraints. For instance, we can't allow
1066                  * a fixup sequence in which two separate read-acquire
1067                  * operations read from the same sequence, where the first one
1068                  * synchronizes and the other doesn't. Essentially, we can't
1069                  * allow any writes to insert themselves between 'release' and
1070                  * 'rf'
1071                  */
1072
1073                 /* Must synchronize */
1074                 if (!synchronize(release, acquire))
1075                         return;
1076
1077                 /* Propagate the changed clock vector */
1078                 propagate_clockvector(acquire, work_queue);
1079         } else {
1080                 /* Break release sequence with new edges:
1081                  *   release --mo--> write --mo--> rf */
1082                 mo_graph->addEdge(release, write);
1083                 mo_graph->addEdge(write, rf);
1084         }
1085
1086         /* See if we have realized a data race */
1087         checkDataRaces();
1088 }
1089
1090 /**
1091  * Initialize the current action by performing one or more of the following
1092  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1093  * in the NodeStack, manipulating backtracking sets, allocating and
1094  * initializing clock vectors, and computing the promises to fulfill.
1095  *
1096  * @param curr The current action, as passed from the user context; may be
1097  * freed/invalidated after the execution of this function, with a different
1098  * action "returned" its place (pass-by-reference)
1099  * @return True if curr is a newly-explored action; false otherwise
1100  */
1101 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1102 {
1103         ModelAction *newcurr;
1104
1105         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1106                 newcurr = process_rmw(*curr);
1107                 delete *curr;
1108
1109                 if (newcurr->is_rmw())
1110                         compute_promises(newcurr);
1111
1112                 *curr = newcurr;
1113                 return false;
1114         }
1115
1116         (*curr)->set_seq_number(get_next_seq_num());
1117
1118         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1119         if (newcurr) {
1120                 /* First restore type and order in case of RMW operation */
1121                 if ((*curr)->is_rmwr())
1122                         newcurr->copy_typeandorder(*curr);
1123
1124                 ASSERT((*curr)->get_location() == newcurr->get_location());
1125                 newcurr->copy_from_new(*curr);
1126
1127                 /* Discard duplicate ModelAction; use action from NodeStack */
1128                 delete *curr;
1129
1130                 /* Always compute new clock vector */
1131                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1132
1133                 *curr = newcurr;
1134                 return false; /* Action was explored previously */
1135         } else {
1136                 newcurr = *curr;
1137
1138                 /* Always compute new clock vector */
1139                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1140
1141                 /* Assign most recent release fence */
1142                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1143
1144                 /*
1145                  * Perform one-time actions when pushing new ModelAction onto
1146                  * NodeStack
1147                  */
1148                 if (newcurr->is_write())
1149                         compute_promises(newcurr);
1150                 else if (newcurr->is_relseq_fixup())
1151                         compute_relseq_breakwrites(newcurr);
1152                 else if (newcurr->is_wait())
1153                         newcurr->get_node()->set_misc_max(2);
1154                 else if (newcurr->is_notify_one()) {
1155                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1156                 }
1157                 return true; /* This was a new ModelAction */
1158         }
1159 }
1160
1161 /**
1162  * @brief Establish reads-from relation between two actions
1163  *
1164  * Perform basic operations involved with establishing a concrete rf relation,
1165  * including setting the ModelAction data and checking for release sequences.
1166  *
1167  * @param act The action that is reading (must be a read)
1168  * @param rf The action from which we are reading (must be a write)
1169  *
1170  * @return True if this read established synchronization
1171  */
1172
1173 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1174 {
1175         ASSERT(rf);
1176         ASSERT(rf->is_write());
1177
1178         act->set_read_from(rf);
1179         if (act->is_acquire()) {
1180                 rel_heads_list_t release_heads;
1181                 get_release_seq_heads(act, act, &release_heads);
1182                 int num_heads = release_heads.size();
1183                 for (unsigned int i = 0; i < release_heads.size(); i++)
1184                         if (!synchronize(release_heads[i], act))
1185                                 num_heads--;
1186                 return num_heads > 0;
1187         }
1188         return false;
1189 }
1190
1191 /**
1192  * @brief Synchronizes two actions
1193  *
1194  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1195  * This function performs the synchronization as well as providing other hooks
1196  * for other checks along with synchronization.
1197  *
1198  * @param first The left-hand side of the synchronizes-with relation
1199  * @param second The right-hand side of the synchronizes-with relation
1200  * @return True if the synchronization was successful (i.e., was consistent
1201  * with the execution order); false otherwise
1202  */
1203 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1204 {
1205         if (*second < *first) {
1206                 set_bad_synchronization();
1207                 return false;
1208         }
1209         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1210         return second->synchronize_with(first);
1211 }
1212
1213 /**
1214  * Check promises and eliminate potentially-satisfying threads when a thread is
1215  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1216  * no longer satisfy a promise generated from that thread.
1217  *
1218  * @param blocker The thread on which a thread is waiting
1219  * @param waiting The waiting thread
1220  */
1221 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1222 {
1223         for (unsigned int i = 0; i < promises.size(); i++) {
1224                 Promise *promise = promises[i];
1225                 if (!promise->thread_is_available(waiting->get_id()))
1226                         continue;
1227                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1228                         ModelAction *reader = promise->get_reader(j);
1229                         if (reader->get_tid() != blocker->get_id())
1230                                 continue;
1231                         if (promise->eliminate_thread(waiting->get_id())) {
1232                                 /* Promise has failed */
1233                                 priv->failed_promise = true;
1234                         } else {
1235                                 /* Only eliminate the 'waiting' thread once */
1236                                 return;
1237                         }
1238                 }
1239         }
1240 }
1241
1242 /**
1243  * @brief Check whether a model action is enabled.
1244  *
1245  * Checks whether an operation would be successful (i.e., is a lock already
1246  * locked, or is the joined thread already complete).
1247  *
1248  * For yield-blocking, yields are never enabled.
1249  *
1250  * @param curr is the ModelAction to check whether it is enabled.
1251  * @return a bool that indicates whether the action is enabled.
1252  */
1253 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1254         if (curr->is_lock()) {
1255                 cdsc::mutex *lock = curr->get_mutex();
1256                 struct cdsc::mutex_state *state = lock->get_state();
1257                 if (state->locked)
1258                         return false;
1259         } else if (curr->is_thread_join()) {
1260                 Thread *blocking = curr->get_thread_operand();
1261                 if (!blocking->is_complete()) {
1262                         thread_blocking_check_promises(blocking, get_thread(curr));
1263                         return false;
1264                 }
1265         } else if (params->yieldblock && curr->is_yield()) {
1266                 return false;
1267         }
1268
1269         return true;
1270 }
1271
1272 /**
1273  * This is the heart of the model checker routine. It performs model-checking
1274  * actions corresponding to a given "current action." Among other processes, it
1275  * calculates reads-from relationships, updates synchronization clock vectors,
1276  * forms a memory_order constraints graph, and handles replay/backtrack
1277  * execution when running permutations of previously-observed executions.
1278  *
1279  * @param curr The current action to process
1280  * @return The ModelAction that is actually executed; may be different than
1281  * curr
1282  */
1283 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1284 {
1285         ASSERT(curr);
1286         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1287         bool newly_explored = initialize_curr_action(&curr);
1288
1289         DBG();
1290
1291         wake_up_sleeping_actions(curr);
1292
1293         /* Compute fairness information for CHESS yield algorithm */
1294         if (params->yieldon) {
1295                 curr->get_node()->update_yield(scheduler);
1296         }
1297
1298         /* Add the action to lists before any other model-checking tasks */
1299         if (!second_part_of_rmw)
1300                 add_action_to_lists(curr);
1301
1302         /* Build may_read_from set for newly-created actions */
1303         if (newly_explored && curr->is_read())
1304                 build_may_read_from(curr);
1305
1306         /* Initialize work_queue with the "current action" work */
1307         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1308         while (!work_queue.empty() && !has_asserted()) {
1309                 WorkQueueEntry work = work_queue.front();
1310                 work_queue.pop_front();
1311
1312                 switch (work.type) {
1313                 case WORK_CHECK_CURR_ACTION: {
1314                         ModelAction *act = work.action;
1315                         bool update = false; /* update this location's release seq's */
1316                         bool update_all = false; /* update all release seq's */
1317
1318                         if (process_thread_action(curr))
1319                                 update_all = true;
1320
1321                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1322                                 update = true;
1323
1324                         if (act->is_write() && process_write(act, &work_queue))
1325                                 update = true;
1326
1327                         if (act->is_fence() && process_fence(act))
1328                                 update_all = true;
1329
1330                         if (act->is_mutex_op() && process_mutex(act))
1331                                 update_all = true;
1332
1333                         if (act->is_relseq_fixup())
1334                                 process_relseq_fixup(curr, &work_queue);
1335
1336                         if (update_all)
1337                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1338                         else if (update)
1339                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1340                         break;
1341                 }
1342                 case WORK_CHECK_RELEASE_SEQ:
1343                         resolve_release_sequences(work.location, &work_queue);
1344                         break;
1345                 case WORK_CHECK_MO_EDGES: {
1346                         /** @todo Complete verification of work_queue */
1347                         ModelAction *act = work.action;
1348                         bool updated = false;
1349
1350                         if (act->is_read()) {
1351                                 const ModelAction *rf = act->get_reads_from();
1352                                 const Promise *promise = act->get_reads_from_promise();
1353                                 if (rf) {
1354                                         if (r_modification_order(act, rf))
1355                                                 updated = true;
1356                                         if (act->is_seqcst()) {
1357                                                 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1358                                                 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1359                                                         set_bad_sc_read();
1360                                                 }
1361                                         }
1362                                 } else if (promise) {
1363                                         if (r_modification_order(act, promise))
1364                                                 updated = true;
1365                                 }
1366                         }
1367                         if (act->is_write()) {
1368                                 if (w_modification_order(act, NULL))
1369                                         updated = true;
1370                         }
1371                         mo_graph->commitChanges();
1372
1373                         if (updated)
1374                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1375                         break;
1376                 }
1377                 default:
1378                         ASSERT(false);
1379                         break;
1380                 }
1381         }
1382
1383         check_curr_backtracking(curr);
1384         set_backtracking(curr);
1385         return curr;
1386 }
1387
1388 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1389 {
1390         Node *currnode = curr->get_node();
1391         Node *parnode = currnode->get_parent();
1392
1393         if ((parnode && !parnode->backtrack_empty()) ||
1394                          !currnode->misc_empty() ||
1395                          !currnode->read_from_empty() ||
1396                          !currnode->promise_empty() ||
1397                          !currnode->relseq_break_empty()) {
1398                 set_latest_backtrack(curr);
1399         }
1400 }
1401
1402 bool ModelExecution::promises_expired() const
1403 {
1404         for (unsigned int i = 0; i < promises.size(); i++) {
1405                 Promise *promise = promises[i];
1406                 if (promise->get_expiration() < priv->used_sequence_numbers)
1407                         return true;
1408         }
1409         return false;
1410 }
1411
1412 /**
1413  * This is the strongest feasibility check available.
1414  * @return whether the current trace (partial or complete) must be a prefix of
1415  * a feasible trace.
1416  */
1417 bool ModelExecution::isfeasibleprefix() const
1418 {
1419         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1420 }
1421
1422 /**
1423  * Print disagnostic information about an infeasible execution
1424  * @param prefix A string to prefix the output with; if NULL, then a default
1425  * message prefix will be provided
1426  */
1427 void ModelExecution::print_infeasibility(const char *prefix) const
1428 {
1429         char buf[100];
1430         char *ptr = buf;
1431         if (mo_graph->checkForCycles())
1432                 ptr += sprintf(ptr, "[mo cycle]");
1433         if (priv->failed_promise || priv->hard_failed_promise)
1434                 ptr += sprintf(ptr, "[failed promise]");
1435         if (priv->too_many_reads)
1436                 ptr += sprintf(ptr, "[too many reads]");
1437         if (priv->no_valid_reads)
1438                 ptr += sprintf(ptr, "[no valid reads-from]");
1439         if (priv->bad_synchronization)
1440                 ptr += sprintf(ptr, "[bad sw ordering]");
1441         if (priv->bad_sc_read)
1442                 ptr += sprintf(ptr, "[bad sc read]");
1443         if (promises_expired())
1444                 ptr += sprintf(ptr, "[promise expired]");
1445         if (promises.size() != 0)
1446                 ptr += sprintf(ptr, "[unresolved promise]");
1447         if (ptr != buf)
1448                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1449 }
1450
1451 /**
1452  * Returns whether the current completed trace is feasible, except for pending
1453  * release sequences.
1454  */
1455 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1456 {
1457         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1458
1459 }
1460
1461 /**
1462  * Check if the current partial trace is infeasible. Does not check any
1463  * end-of-execution flags, which might rule out the execution. Thus, this is
1464  * useful only for ruling an execution as infeasible.
1465  * @return whether the current partial trace is infeasible.
1466  */
1467 bool ModelExecution::is_infeasible() const
1468 {
1469         return mo_graph->checkForCycles() ||
1470                 priv->no_valid_reads ||
1471                 priv->too_many_reads ||
1472                 priv->bad_synchronization ||
1473                 priv->bad_sc_read ||
1474                 priv->hard_failed_promise ||
1475                 promises_expired();
1476 }
1477
1478 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1479 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1480         ModelAction *lastread = get_last_action(act->get_tid());
1481         lastread->process_rmw(act);
1482         if (act->is_rmw()) {
1483                 if (lastread->get_reads_from())
1484                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1485                 else
1486                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1487                 mo_graph->commitChanges();
1488         }
1489         return lastread;
1490 }
1491
1492 /**
1493  * A helper function for ModelExecution::check_recency, to check if the current
1494  * thread is able to read from a different write/promise for 'params.maxreads'
1495  * number of steps and if that write/promise should become visible (i.e., is
1496  * ordered later in the modification order). This helps model memory liveness.
1497  *
1498  * @param curr The current action. Must be a read.
1499  * @param rf The write/promise from which we plan to read
1500  * @param other_rf The write/promise from which we may read
1501  * @return True if we were able to read from other_rf for params.maxreads steps
1502  */
1503 template <typename T, typename U>
1504 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1505 {
1506         /* Need a different write/promise */
1507         if (other_rf->equals(rf))
1508                 return false;
1509
1510         /* Only look for "newer" writes/promises */
1511         if (!mo_graph->checkReachable(rf, other_rf))
1512                 return false;
1513
1514         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1515         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1516         action_list_t::reverse_iterator rit = list->rbegin();
1517         ASSERT((*rit) == curr);
1518         /* Skip past curr */
1519         rit++;
1520
1521         /* Does this write/promise work for everyone? */
1522         for (int i = 0; i < params->maxreads; i++, rit++) {
1523                 ModelAction *act = *rit;
1524                 if (!act->may_read_from(other_rf))
1525                         return false;
1526         }
1527         return true;
1528 }
1529
1530 /**
1531  * Checks whether a thread has read from the same write or Promise for too many
1532  * times without seeing the effects of a later write/Promise.
1533  *
1534  * Basic idea:
1535  * 1) there must a different write/promise that we could read from,
1536  * 2) we must have read from the same write/promise in excess of maxreads times,
1537  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1538  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1539  *
1540  * If so, we decide that the execution is no longer feasible.
1541  *
1542  * @param curr The current action. Must be a read.
1543  * @param rf The ModelAction/Promise from which we might read.
1544  * @return True if the read should succeed; false otherwise
1545  */
1546 template <typename T>
1547 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1548 {
1549         if (!params->maxreads)
1550                 return true;
1551
1552         //NOTE: Next check is just optimization, not really necessary....
1553         if (curr->get_node()->get_read_from_past_size() +
1554                         curr->get_node()->get_read_from_promise_size() <= 1)
1555                 return true;
1556
1557         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1558         int tid = id_to_int(curr->get_tid());
1559         ASSERT(tid < (int)thrd_lists->size());
1560         action_list_t *list = &(*thrd_lists)[tid];
1561         action_list_t::reverse_iterator rit = list->rbegin();
1562         ASSERT((*rit) == curr);
1563         /* Skip past curr */
1564         rit++;
1565
1566         action_list_t::reverse_iterator ritcopy = rit;
1567         /* See if we have enough reads from the same value */
1568         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1569                 if (ritcopy == list->rend())
1570                         return true;
1571                 ModelAction *act = *ritcopy;
1572                 if (!act->is_read())
1573                         return true;
1574                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1575                         return true;
1576                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1577                         return true;
1578                 if (act->get_node()->get_read_from_past_size() +
1579                                 act->get_node()->get_read_from_promise_size() <= 1)
1580                         return true;
1581         }
1582         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1583                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1584                 if (should_read_instead(curr, rf, write))
1585                         return false; /* liveness failure */
1586         }
1587         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1588                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1589                 if (should_read_instead(curr, rf, promise))
1590                         return false; /* liveness failure */
1591         }
1592         return true;
1593 }
1594
1595 /**
1596  * @brief Updates the mo_graph with the constraints imposed from the current
1597  * read.
1598  *
1599  * Basic idea is the following: Go through each other thread and find
1600  * the last action that happened before our read.  Two cases:
1601  *
1602  * -# The action is a write: that write must either occur before
1603  * the write we read from or be the write we read from.
1604  * -# The action is a read: the write that that action read from
1605  * must occur before the write we read from or be the same write.
1606  *
1607  * @param curr The current action. Must be a read.
1608  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1609  * @return True if modification order edges were added; false otherwise
1610  */
1611 template <typename rf_type>
1612 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1613 {
1614         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1615         unsigned int i;
1616         bool added = false;
1617         ASSERT(curr->is_read());
1618
1619         /* Last SC fence in the current thread */
1620         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1621         ModelAction *last_sc_write = NULL;
1622         if (curr->is_seqcst())
1623                 last_sc_write = get_last_seq_cst_write(curr);
1624
1625         /* Iterate over all threads */
1626         for (i = 0; i < thrd_lists->size(); i++) {
1627                 /* Last SC fence in thread i */
1628                 ModelAction *last_sc_fence_thread_local = NULL;
1629                 if (int_to_id((int)i) != curr->get_tid())
1630                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1631
1632                 /* Last SC fence in thread i, before last SC fence in current thread */
1633                 ModelAction *last_sc_fence_thread_before = NULL;
1634                 if (last_sc_fence_local)
1635                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1636
1637                 /* Iterate over actions in thread, starting from most recent */
1638                 action_list_t *list = &(*thrd_lists)[i];
1639                 action_list_t::reverse_iterator rit;
1640                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1641                         ModelAction *act = *rit;
1642
1643                         /* Skip curr */
1644                         if (act == curr)
1645                                 continue;
1646                         /* Don't want to add reflexive edges on 'rf' */
1647                         if (act->equals(rf)) {
1648                                 if (act->happens_before(curr))
1649                                         break;
1650                                 else
1651                                         continue;
1652                         }
1653
1654                         if (act->is_write()) {
1655                                 /* C++, Section 29.3 statement 5 */
1656                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1657                                                 *act < *last_sc_fence_thread_local) {
1658                                         added = mo_graph->addEdge(act, rf) || added;
1659                                         break;
1660                                 }
1661                                 /* C++, Section 29.3 statement 4 */
1662                                 else if (act->is_seqcst() && last_sc_fence_local &&
1663                                                 *act < *last_sc_fence_local) {
1664                                         added = mo_graph->addEdge(act, rf) || added;
1665                                         break;
1666                                 }
1667                                 /* C++, Section 29.3 statement 6 */
1668                                 else if (last_sc_fence_thread_before &&
1669                                                 *act < *last_sc_fence_thread_before) {
1670                                         added = mo_graph->addEdge(act, rf) || added;
1671                                         break;
1672                                 }
1673                         }
1674
1675                         /*
1676                          * Include at most one act per-thread that "happens
1677                          * before" curr
1678                          */
1679                         if (act->happens_before(curr)) {
1680                                 if (act->is_write()) {
1681                                         added = mo_graph->addEdge(act, rf) || added;
1682                                 } else {
1683                                         const ModelAction *prevrf = act->get_reads_from();
1684                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1685                                         if (prevrf) {
1686                                                 if (!prevrf->equals(rf))
1687                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1688                                         } else if (!prevrf_promise->equals(rf)) {
1689                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1690                                         }
1691                                 }
1692                                 break;
1693                         }
1694                 }
1695         }
1696
1697         /*
1698          * All compatible, thread-exclusive promises must be ordered after any
1699          * concrete loads from the same thread
1700          */
1701         for (unsigned int i = 0; i < promises.size(); i++)
1702                 if (promises[i]->is_compatible_exclusive(curr))
1703                         added = mo_graph->addEdge(rf, promises[i]) || added;
1704
1705         return added;
1706 }
1707
1708 /**
1709  * Updates the mo_graph with the constraints imposed from the current write.
1710  *
1711  * Basic idea is the following: Go through each other thread and find
1712  * the lastest action that happened before our write.  Two cases:
1713  *
1714  * (1) The action is a write => that write must occur before
1715  * the current write
1716  *
1717  * (2) The action is a read => the write that that action read from
1718  * must occur before the current write.
1719  *
1720  * This method also handles two other issues:
1721  *
1722  * (I) Sequential Consistency: Making sure that if the current write is
1723  * seq_cst, that it occurs after the previous seq_cst write.
1724  *
1725  * (II) Sending the write back to non-synchronizing reads.
1726  *
1727  * @param curr The current action. Must be a write.
1728  * @param send_fv A vector for stashing reads to which we may pass our future
1729  * value. If NULL, then don't record any future values.
1730  * @return True if modification order edges were added; false otherwise
1731  */
1732 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1733 {
1734         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1735         unsigned int i;
1736         bool added = false;
1737         ASSERT(curr->is_write());
1738
1739         if (curr->is_seqcst()) {
1740                 /* We have to at least see the last sequentially consistent write,
1741                          so we are initialized. */
1742                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1743                 if (last_seq_cst != NULL) {
1744                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1745                 }
1746         }
1747
1748         /* Last SC fence in the current thread */
1749         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1750
1751         /* Iterate over all threads */
1752         for (i = 0; i < thrd_lists->size(); i++) {
1753                 /* Last SC fence in thread i, before last SC fence in current thread */
1754                 ModelAction *last_sc_fence_thread_before = NULL;
1755                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1756                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1757
1758                 /* Iterate over actions in thread, starting from most recent */
1759                 action_list_t *list = &(*thrd_lists)[i];
1760                 action_list_t::reverse_iterator rit;
1761                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1762                         ModelAction *act = *rit;
1763                         if (act == curr) {
1764                                 /*
1765                                  * 1) If RMW and it actually read from something, then we
1766                                  * already have all relevant edges, so just skip to next
1767                                  * thread.
1768                                  *
1769                                  * 2) If RMW and it didn't read from anything, we should
1770                                  * whatever edge we can get to speed up convergence.
1771                                  *
1772                                  * 3) If normal write, we need to look at earlier actions, so
1773                                  * continue processing list.
1774                                  */
1775                                 if (curr->is_rmw()) {
1776                                         if (curr->get_reads_from() != NULL)
1777                                                 break;
1778                                         else
1779                                                 continue;
1780                                 } else
1781                                         continue;
1782                         }
1783
1784                         /* C++, Section 29.3 statement 7 */
1785                         if (last_sc_fence_thread_before && act->is_write() &&
1786                                         *act < *last_sc_fence_thread_before) {
1787                                 added = mo_graph->addEdge(act, curr) || added;
1788                                 break;
1789                         }
1790
1791                         /*
1792                          * Include at most one act per-thread that "happens
1793                          * before" curr
1794                          */
1795                         if (act->happens_before(curr)) {
1796                                 /*
1797                                  * Note: if act is RMW, just add edge:
1798                                  *   act --mo--> curr
1799                                  * The following edge should be handled elsewhere:
1800                                  *   readfrom(act) --mo--> act
1801                                  */
1802                                 if (act->is_write())
1803                                         added = mo_graph->addEdge(act, curr) || added;
1804                                 else if (act->is_read()) {
1805                                         //if previous read accessed a null, just keep going
1806                                         if (act->get_reads_from() == NULL) {
1807                                                 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1808                                         } else
1809                                                 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1810                                 }
1811                                 break;
1812                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1813                                                      !act->same_thread(curr)) {
1814                                 /* We have an action that:
1815                                    (1) did not happen before us
1816                                    (2) is a read and we are a write
1817                                    (3) cannot synchronize with us
1818                                    (4) is in a different thread
1819                                    =>
1820                                    that read could potentially read from our write.  Note that
1821                                    these checks are overly conservative at this point, we'll
1822                                    do more checks before actually removing the
1823                                    pendingfuturevalue.
1824
1825                                  */
1826
1827                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1828                                         if (!is_infeasible())
1829                                                 send_fv->push_back(act);
1830                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1831                                                 add_future_value(curr, act);
1832                                 }
1833                         }
1834                 }
1835         }
1836
1837         /*
1838          * All compatible, thread-exclusive promises must be ordered after any
1839          * concrete stores to the same thread, or else they can be merged with
1840          * this store later
1841          */
1842         for (unsigned int i = 0; i < promises.size(); i++)
1843                 if (promises[i]->is_compatible_exclusive(curr))
1844                         added = mo_graph->addEdge(curr, promises[i]) || added;
1845
1846         return added;
1847 }
1848
1849 //This procedure uses cohere to prune future values that are
1850 //guaranteed to generate a coherence violation.
1851 //
1852 //need to see if there is (1) a promise for thread write, (2)
1853 //the promise is sb before write, (3) the promise can only be
1854 //resolved by the thread read, and (4) the promise has same
1855 //location as read/write
1856
1857 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1858         thread_id_t write_tid=write->get_tid();
1859         for(unsigned int i = promises.size(); i>0; i--) {
1860                 Promise *pr=promises[i-1];
1861                 if (!pr->same_location(write))
1862                         continue;
1863                 //the reading thread is the only thread that can resolve the promise
1864                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1865                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1866                                 ModelAction *prreader=pr->get_reader(j);
1867                                 //the writing thread reads from the promise before the write
1868                                 if (prreader->get_tid()==write_tid &&
1869                                                 (*prreader)<(*write)) {
1870                                         if ((*read)>(*prreader)) {
1871                                                 //check that we don't have a read between the read and promise
1872                                                 //from the same thread as read
1873                                                 bool okay=false;
1874                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1875                                                         tmp=tmp->get_node()->get_parent()->get_action();
1876                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1877                                                                 okay=true;
1878                                                                 break;
1879                                                         }
1880                                                 }
1881                                                 if (okay)
1882                                                         continue;
1883                                         }
1884                                         return false;
1885                                 }
1886                         }
1887                 }
1888         }
1889         return true;
1890 }
1891
1892
1893 /** Arbitrary reads from the future are not allowed.  Section 29.3
1894  * part 9 places some constraints.  This method checks one result of constraint
1895  * constraint.  Others require compiler support. */
1896 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1897 {
1898         if (!writer->is_rmw())
1899                 return true;
1900
1901         if (!reader->is_rmw())
1902                 return true;
1903
1904         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1905                 if (search == reader)
1906                         return false;
1907                 if (search->get_tid() == reader->get_tid() &&
1908                                 search->happens_before(reader))
1909                         break;
1910         }
1911
1912         return true;
1913 }
1914
1915 /**
1916  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1917  * some constraints. This method checks one the following constraint (others
1918  * require compiler support):
1919  *
1920  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1921  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1922  */
1923 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1924 {
1925         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1926         unsigned int i;
1927         /* Iterate over all threads */
1928         for (i = 0; i < thrd_lists->size(); i++) {
1929                 const ModelAction *write_after_read = NULL;
1930
1931                 /* Iterate over actions in thread, starting from most recent */
1932                 action_list_t *list = &(*thrd_lists)[i];
1933                 action_list_t::reverse_iterator rit;
1934                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1935                         ModelAction *act = *rit;
1936
1937                         /* Don't disallow due to act == reader */
1938                         if (!reader->happens_before(act) || reader == act)
1939                                 break;
1940                         else if (act->is_write())
1941                                 write_after_read = act;
1942                         else if (act->is_read() && act->get_reads_from() != NULL)
1943                                 write_after_read = act->get_reads_from();
1944                 }
1945
1946                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1947                         return false;
1948         }
1949         return true;
1950 }
1951
1952 /**
1953  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1954  * The ModelAction under consideration is expected to be taking part in
1955  * release/acquire synchronization as an object of the "reads from" relation.
1956  * Note that this can only provide release sequence support for RMW chains
1957  * which do not read from the future, as those actions cannot be traced until
1958  * their "promise" is fulfilled. Similarly, we may not even establish the
1959  * presence of a release sequence with certainty, as some modification order
1960  * constraints may be decided further in the future. Thus, this function
1961  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1962  * and a boolean representing certainty.
1963  *
1964  * @param rf The action that might be part of a release sequence. Must be a
1965  * write.
1966  * @param release_heads A pass-by-reference style return parameter. After
1967  * execution of this function, release_heads will contain the heads of all the
1968  * relevant release sequences, if any exists with certainty
1969  * @param pending A pass-by-reference style return parameter which is only used
1970  * when returning false (i.e., uncertain). Returns most information regarding
1971  * an uncertain release sequence, including any write operations that might
1972  * break the sequence.
1973  * @return true, if the ModelExecution is certain that release_heads is complete;
1974  * false otherwise
1975  */
1976 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1977                 rel_heads_list_t *release_heads,
1978                 struct release_seq *pending) const
1979 {
1980         /* Only check for release sequences if there are no cycles */
1981         if (mo_graph->checkForCycles())
1982                 return false;
1983
1984         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1985                 ASSERT(rf->is_write());
1986
1987                 if (rf->is_release())
1988                         release_heads->push_back(rf);
1989                 else if (rf->get_last_fence_release())
1990                         release_heads->push_back(rf->get_last_fence_release());
1991                 if (!rf->is_rmw())
1992                         break; /* End of RMW chain */
1993
1994                 /** @todo Need to be smarter here...  In the linux lock
1995                  * example, this will run to the beginning of the program for
1996                  * every acquire. */
1997                 /** @todo The way to be smarter here is to keep going until 1
1998                  * thread has a release preceded by an acquire and you've seen
1999                  *       both. */
2000
2001                 /* acq_rel RMW is a sufficient stopping condition */
2002                 if (rf->is_acquire() && rf->is_release())
2003                         return true; /* complete */
2004         };
2005         if (!rf) {
2006                 /* read from future: need to settle this later */
2007                 pending->rf = NULL;
2008                 return false; /* incomplete */
2009         }
2010
2011         if (rf->is_release())
2012                 return true; /* complete */
2013
2014         /* else relaxed write
2015          * - check for fence-release in the same thread (29.8, stmt. 3)
2016          * - check modification order for contiguous subsequence
2017          *   -> rf must be same thread as release */
2018
2019         const ModelAction *fence_release = rf->get_last_fence_release();
2020         /* Synchronize with a fence-release unconditionally; we don't need to
2021          * find any more "contiguous subsequence..." for it */
2022         if (fence_release)
2023                 release_heads->push_back(fence_release);
2024
2025         int tid = id_to_int(rf->get_tid());
2026         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
2027         action_list_t *list = &(*thrd_lists)[tid];
2028         action_list_t::const_reverse_iterator rit;
2029
2030         /* Find rf in the thread list */
2031         rit = std::find(list->rbegin(), list->rend(), rf);
2032         ASSERT(rit != list->rend());
2033
2034         /* Find the last {write,fence}-release */
2035         for (; rit != list->rend(); rit++) {
2036                 if (fence_release && *(*rit) < *fence_release)
2037                         break;
2038                 if ((*rit)->is_release())
2039                         break;
2040         }
2041         if (rit == list->rend()) {
2042                 /* No write-release in this thread */
2043                 return true; /* complete */
2044         } else if (fence_release && *(*rit) < *fence_release) {
2045                 /* The fence-release is more recent (and so, "stronger") than
2046                  * the most recent write-release */
2047                 return true; /* complete */
2048         } /* else, need to establish contiguous release sequence */
2049         ModelAction *release = *rit;
2050
2051         ASSERT(rf->same_thread(release));
2052
2053         pending->writes.clear();
2054
2055         bool certain = true;
2056         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2057                 if (id_to_int(rf->get_tid()) == (int)i)
2058                         continue;
2059                 list = &(*thrd_lists)[i];
2060
2061                 /* Can we ensure no future writes from this thread may break
2062                  * the release seq? */
2063                 bool future_ordered = false;
2064
2065                 ModelAction *last = get_last_action(int_to_id(i));
2066                 Thread *th = get_thread(int_to_id(i));
2067                 if ((last && rf->happens_before(last)) ||
2068                                 !is_enabled(th) ||
2069                                 th->is_complete())
2070                         future_ordered = true;
2071
2072                 ASSERT(!th->is_model_thread() || future_ordered);
2073
2074                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2075                         const ModelAction *act = *rit;
2076                         /* Reach synchronization -> this thread is complete */
2077                         if (act->happens_before(release))
2078                                 break;
2079                         if (rf->happens_before(act)) {
2080                                 future_ordered = true;
2081                                 continue;
2082                         }
2083
2084                         /* Only non-RMW writes can break release sequences */
2085                         if (!act->is_write() || act->is_rmw())
2086                                 continue;
2087
2088                         /* Check modification order */
2089                         if (mo_graph->checkReachable(rf, act)) {
2090                                 /* rf --mo--> act */
2091                                 future_ordered = true;
2092                                 continue;
2093                         }
2094                         if (mo_graph->checkReachable(act, release))
2095                                 /* act --mo--> release */
2096                                 break;
2097                         if (mo_graph->checkReachable(release, act) &&
2098                                       mo_graph->checkReachable(act, rf)) {
2099                                 /* release --mo-> act --mo--> rf */
2100                                 return true; /* complete */
2101                         }
2102                         /* act may break release sequence */
2103                         pending->writes.push_back(act);
2104                         certain = false;
2105                 }
2106                 if (!future_ordered)
2107                         certain = false; /* This thread is uncertain */
2108         }
2109
2110         if (certain) {
2111                 release_heads->push_back(release);
2112                 pending->writes.clear();
2113         } else {
2114                 pending->release = release;
2115                 pending->rf = rf;
2116         }
2117         return certain;
2118 }
2119
2120 /**
2121  * An interface for getting the release sequence head(s) with which a
2122  * given ModelAction must synchronize. This function only returns a non-empty
2123  * result when it can locate a release sequence head with certainty. Otherwise,
2124  * it may mark the internal state of the ModelExecution so that it will handle
2125  * the release sequence at a later time, causing @a acquire to update its
2126  * synchronization at some later point in execution.
2127  *
2128  * @param acquire The 'acquire' action that may synchronize with a release
2129  * sequence
2130  * @param read The read action that may read from a release sequence; this may
2131  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2132  * when 'acquire' is a fence-acquire)
2133  * @param release_heads A pass-by-reference return parameter. Will be filled
2134  * with the head(s) of the release sequence(s), if they exists with certainty.
2135  * @see ModelExecution::release_seq_heads
2136  */
2137 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2138                 ModelAction *read, rel_heads_list_t *release_heads)
2139 {
2140         const ModelAction *rf = read->get_reads_from();
2141         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2142         sequence->acquire = acquire;
2143         sequence->read = read;
2144
2145         if (!release_seq_heads(rf, release_heads, sequence)) {
2146                 /* add act to 'lazy checking' list */
2147                 pending_rel_seqs.push_back(sequence);
2148         } else {
2149                 snapshot_free(sequence);
2150         }
2151 }
2152
2153 /**
2154  * @brief Propagate a modified clock vector to actions later in the execution
2155  * order
2156  *
2157  * After an acquire operation lazily completes a release-sequence
2158  * synchronization, we must update all clock vectors for operations later than
2159  * the acquire in the execution order.
2160  *
2161  * @param acquire The ModelAction whose clock vector must be propagated
2162  * @param work The work queue to which we can add work items, if this
2163  * propagation triggers more updates (e.g., to the modification order)
2164  */
2165 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2166 {
2167         /* Re-check all pending release sequences */
2168         work->push_back(CheckRelSeqWorkEntry(NULL));
2169         /* Re-check read-acquire for mo_graph edges */
2170         work->push_back(MOEdgeWorkEntry(acquire));
2171
2172         /* propagate synchronization to later actions */
2173         action_list_t::reverse_iterator rit = action_trace.rbegin();
2174         for (; (*rit) != acquire; rit++) {
2175                 ModelAction *propagate = *rit;
2176                 if (acquire->happens_before(propagate)) {
2177                         synchronize(acquire, propagate);
2178                         /* Re-check 'propagate' for mo_graph edges */
2179                         work->push_back(MOEdgeWorkEntry(propagate));
2180                 }
2181         }
2182 }
2183
2184 /**
2185  * Attempt to resolve all stashed operations that might synchronize with a
2186  * release sequence for a given location. This implements the "lazy" portion of
2187  * determining whether or not a release sequence was contiguous, since not all
2188  * modification order information is present at the time an action occurs.
2189  *
2190  * @param location The location/object that should be checked for release
2191  * sequence resolutions. A NULL value means to check all locations.
2192  * @param work_queue The work queue to which to add work items as they are
2193  * generated
2194  * @return True if any updates occurred (new synchronization, new mo_graph
2195  * edges)
2196  */
2197 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2198 {
2199         bool updated = false;
2200         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2201         while (it != pending_rel_seqs.end()) {
2202                 struct release_seq *pending = *it;
2203                 ModelAction *acquire = pending->acquire;
2204                 const ModelAction *read = pending->read;
2205
2206                 /* Only resolve sequences on the given location, if provided */
2207                 if (location && read->get_location() != location) {
2208                         it++;
2209                         continue;
2210                 }
2211
2212                 const ModelAction *rf = read->get_reads_from();
2213                 rel_heads_list_t release_heads;
2214                 bool complete;
2215                 complete = release_seq_heads(rf, &release_heads, pending);
2216                 for (unsigned int i = 0; i < release_heads.size(); i++)
2217                         if (!acquire->has_synchronized_with(release_heads[i]))
2218                                 if (synchronize(release_heads[i], acquire))
2219                                         updated = true;
2220
2221                 if (updated) {
2222                         /* Propagate the changed clock vector */
2223                         propagate_clockvector(acquire, work_queue);
2224                 }
2225                 if (complete) {
2226                         it = pending_rel_seqs.erase(it);
2227                         snapshot_free(pending);
2228                 } else {
2229                         it++;
2230                 }
2231         }
2232
2233         // If we resolved promises or data races, see if we have realized a data race.
2234         checkDataRaces();
2235
2236         return updated;
2237 }
2238
2239 /**
2240  * Performs various bookkeeping operations for the current ModelAction. For
2241  * instance, adds action to the per-object, per-thread action vector and to the
2242  * action trace list of all thread actions.
2243  *
2244  * @param act is the ModelAction to add.
2245  */
2246 void ModelExecution::add_action_to_lists(ModelAction *act)
2247 {
2248         int tid = id_to_int(act->get_tid());
2249         ModelAction *uninit = NULL;
2250         int uninit_id = -1;
2251         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2252         if (list->empty() && act->is_atomic_var()) {
2253                 uninit = get_uninitialized_action(act);
2254                 uninit_id = id_to_int(uninit->get_tid());
2255                 list->push_front(uninit);
2256         }
2257         list->push_back(act);
2258
2259         action_trace.push_back(act);
2260         if (uninit)
2261                 action_trace.push_front(uninit);
2262
2263         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2264         if (tid >= (int)vec->size())
2265                 vec->resize(priv->next_thread_id);
2266         (*vec)[tid].push_back(act);
2267         if (uninit)
2268                 (*vec)[uninit_id].push_front(uninit);
2269
2270         if ((int)thrd_last_action.size() <= tid)
2271                 thrd_last_action.resize(get_num_threads());
2272         thrd_last_action[tid] = act;
2273         if (uninit)
2274                 thrd_last_action[uninit_id] = uninit;
2275
2276         if (act->is_fence() && act->is_release()) {
2277                 if ((int)thrd_last_fence_release.size() <= tid)
2278                         thrd_last_fence_release.resize(get_num_threads());
2279                 thrd_last_fence_release[tid] = act;
2280         }
2281
2282         if (act->is_wait()) {
2283                 void *mutex_loc = (void *) act->get_value();
2284                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2285
2286                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2287                 if (tid >= (int)vec->size())
2288                         vec->resize(priv->next_thread_id);
2289                 (*vec)[tid].push_back(act);
2290         }
2291 }
2292
2293 /**
2294  * @brief Get the last action performed by a particular Thread
2295  * @param tid The thread ID of the Thread in question
2296  * @return The last action in the thread
2297  */
2298 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2299 {
2300         int threadid = id_to_int(tid);
2301         if (threadid < (int)thrd_last_action.size())
2302                 return thrd_last_action[id_to_int(tid)];
2303         else
2304                 return NULL;
2305 }
2306
2307 /**
2308  * @brief Get the last fence release performed by a particular Thread
2309  * @param tid The thread ID of the Thread in question
2310  * @return The last fence release in the thread, if one exists; NULL otherwise
2311  */
2312 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2313 {
2314         int threadid = id_to_int(tid);
2315         if (threadid < (int)thrd_last_fence_release.size())
2316                 return thrd_last_fence_release[id_to_int(tid)];
2317         else
2318                 return NULL;
2319 }
2320
2321 /**
2322  * Gets the last memory_order_seq_cst write (in the total global sequence)
2323  * performed on a particular object (i.e., memory location), not including the
2324  * current action.
2325  * @param curr The current ModelAction; also denotes the object location to
2326  * check
2327  * @return The last seq_cst write
2328  */
2329 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2330 {
2331         void *location = curr->get_location();
2332         action_list_t *list = obj_map.get(location);
2333         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2334         action_list_t::reverse_iterator rit;
2335         for (rit = list->rbegin(); (*rit) != curr; rit++)
2336                 ;
2337         rit++; /* Skip past curr */
2338         for ( ; rit != list->rend(); rit++)
2339                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2340                         return *rit;
2341         return NULL;
2342 }
2343
2344 /**
2345  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2346  * performed in a particular thread, prior to a particular fence.
2347  * @param tid The ID of the thread to check
2348  * @param before_fence The fence from which to begin the search; if NULL, then
2349  * search for the most recent fence in the thread.
2350  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2351  */
2352 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2353 {
2354         /* All fences should have location FENCE_LOCATION */
2355         action_list_t *list = obj_map.get(FENCE_LOCATION);
2356
2357         if (!list)
2358                 return NULL;
2359
2360         action_list_t::reverse_iterator rit = list->rbegin();
2361
2362         if (before_fence) {
2363                 for (; rit != list->rend(); rit++)
2364                         if (*rit == before_fence)
2365                                 break;
2366
2367                 ASSERT(*rit == before_fence);
2368                 rit++;
2369         }
2370
2371         for (; rit != list->rend(); rit++)
2372                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2373                         return *rit;
2374         return NULL;
2375 }
2376
2377 /**
2378  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2379  * location). This function identifies the mutex according to the current
2380  * action, which is presumed to perform on the same mutex.
2381  * @param curr The current ModelAction; also denotes the object location to
2382  * check
2383  * @return The last unlock operation
2384  */
2385 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2386 {
2387         void *location = curr->get_location();
2388
2389         action_list_t *list = obj_map.get(location);
2390         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2391         action_list_t::reverse_iterator rit;
2392         for (rit = list->rbegin(); rit != list->rend(); rit++)
2393                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2394                         return *rit;
2395         return NULL;
2396 }
2397
2398 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2399 {
2400         ModelAction *parent = get_last_action(tid);
2401         if (!parent)
2402                 parent = get_thread(tid)->get_creation();
2403         return parent;
2404 }
2405
2406 /**
2407  * Returns the clock vector for a given thread.
2408  * @param tid The thread whose clock vector we want
2409  * @return Desired clock vector
2410  */
2411 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2412 {
2413         return get_parent_action(tid)->get_cv();
2414 }
2415
2416 /**
2417  * @brief Find the promise (if any) to resolve for the current action and
2418  * remove it from the pending promise vector
2419  * @param curr The current ModelAction. Should be a write.
2420  * @return The Promise to resolve, if any; otherwise NULL
2421  */
2422 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2423 {
2424         for (unsigned int i = 0; i < promises.size(); i++)
2425                 if (curr->get_node()->get_promise(i)) {
2426                         Promise *ret = promises[i];
2427                         promises.erase(promises.begin() + i);
2428                         return ret;
2429                 }
2430         return NULL;
2431 }
2432
2433 /**
2434  * Resolve a Promise with a current write.
2435  * @param write The ModelAction that is fulfilling Promises
2436  * @param promise The Promise to resolve
2437  * @param work The work queue, for adding new fixup work
2438  * @return True if the Promise was successfully resolved; false otherwise
2439  */
2440 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2441                 work_queue_t *work)
2442 {
2443         ModelVector<ModelAction *> actions_to_check;
2444
2445         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2446                 ModelAction *read = promise->get_reader(i);
2447                 if (read_from(read, write)) {
2448                         /* Propagate the changed clock vector */
2449                         propagate_clockvector(read, work);
2450                 }
2451                 actions_to_check.push_back(read);
2452         }
2453         /* Make sure the promise's value matches the write's value */
2454         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2455         if (!mo_graph->resolvePromise(promise, write))
2456                 priv->hard_failed_promise = true;
2457
2458         /**
2459          * @todo  It is possible to end up in an inconsistent state, where a
2460          * "resolved" promise may still be referenced if
2461          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2462          *
2463          * Note that the inconsistency only matters when dumping mo_graph to
2464          * file.
2465          *
2466          * delete promise;
2467          */
2468
2469         //Check whether reading these writes has made threads unable to
2470         //resolve promises
2471         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2472                 ModelAction *read = actions_to_check[i];
2473                 mo_check_promises(read, true);
2474         }
2475
2476         return true;
2477 }
2478
2479 /**
2480  * Compute the set of promises that could potentially be satisfied by this
2481  * action. Note that the set computation actually appears in the Node, not in
2482  * ModelExecution.
2483  * @param curr The ModelAction that may satisfy promises
2484  */
2485 void ModelExecution::compute_promises(ModelAction *curr)
2486 {
2487         for (unsigned int i = 0; i < promises.size(); i++) {
2488                 Promise *promise = promises[i];
2489                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2490                         continue;
2491
2492                 bool satisfy = true;
2493                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2494                         const ModelAction *act = promise->get_reader(j);
2495                         if (act->happens_before(curr) ||
2496                                         act->could_synchronize_with(curr)) {
2497                                 satisfy = false;
2498                                 break;
2499                         }
2500                 }
2501                 if (satisfy)
2502                         curr->get_node()->set_promise(i);
2503         }
2504 }
2505
2506 /** Checks promises in response to change in ClockVector Threads. */
2507 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2508 {
2509         for (unsigned int i = 0; i < promises.size(); i++) {
2510                 Promise *promise = promises[i];
2511                 if (!promise->thread_is_available(tid))
2512                         continue;
2513                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2514                         const ModelAction *act = promise->get_reader(j);
2515                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2516                                         merge_cv->synchronized_since(act)) {
2517                                 if (promise->eliminate_thread(tid)) {
2518                                         /* Promise has failed */
2519                                         priv->failed_promise = true;
2520                                         return;
2521                                 }
2522                         }
2523                 }
2524         }
2525 }
2526
2527 void ModelExecution::check_promises_thread_disabled()
2528 {
2529         for (unsigned int i = 0; i < promises.size(); i++) {
2530                 Promise *promise = promises[i];
2531                 if (promise->has_failed()) {
2532                         priv->failed_promise = true;
2533                         return;
2534                 }
2535         }
2536 }
2537
2538 /**
2539  * @brief Checks promises in response to addition to modification order for
2540  * threads.
2541  *
2542  * We test whether threads are still available for satisfying promises after an
2543  * addition to our modification order constraints. Those that are unavailable
2544  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2545  * that promise has failed.
2546  *
2547  * @param act The ModelAction which updated the modification order
2548  * @param is_read_check Should be true if act is a read and we must check for
2549  * updates to the store from which it read (there is a distinction here for
2550  * RMW's, which are both a load and a store)
2551  */
2552 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2553 {
2554         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2555
2556         for (unsigned int i = 0; i < promises.size(); i++) {
2557                 Promise *promise = promises[i];
2558
2559                 // Is this promise on the same location?
2560                 if (!promise->same_location(write))
2561                         continue;
2562
2563                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2564                         const ModelAction *pread = promise->get_reader(j);
2565                         if (!pread->happens_before(act))
2566                                continue;
2567                         if (mo_graph->checkPromise(write, promise)) {
2568                                 priv->hard_failed_promise = true;
2569                                 return;
2570                         }
2571                         break;
2572                 }
2573
2574                 // Don't do any lookups twice for the same thread
2575                 if (!promise->thread_is_available(act->get_tid()))
2576                         continue;
2577
2578                 if (mo_graph->checkReachable(promise, write)) {
2579                         if (mo_graph->checkPromise(write, promise)) {
2580                                 priv->hard_failed_promise = true;
2581                                 return;
2582                         }
2583                 }
2584         }
2585 }
2586
2587 /**
2588  * Compute the set of writes that may break the current pending release
2589  * sequence. This information is extracted from previou release sequence
2590  * calculations.
2591  *
2592  * @param curr The current ModelAction. Must be a release sequence fixup
2593  * action.
2594  */
2595 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2596 {
2597         if (pending_rel_seqs.empty())
2598                 return;
2599
2600         struct release_seq *pending = pending_rel_seqs.back();
2601         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2602                 const ModelAction *write = pending->writes[i];
2603                 curr->get_node()->add_relseq_break(write);
2604         }
2605
2606         /* NULL means don't break the sequence; just synchronize */
2607         curr->get_node()->add_relseq_break(NULL);
2608 }
2609
2610 /**
2611  * Build up an initial set of all past writes that this 'read' action may read
2612  * from, as well as any previously-observed future values that must still be valid.
2613  *
2614  * @param curr is the current ModelAction that we are exploring; it must be a
2615  * 'read' operation.
2616  */
2617 void ModelExecution::build_may_read_from(ModelAction *curr)
2618 {
2619         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2620         unsigned int i;
2621         ASSERT(curr->is_read());
2622
2623         ModelAction *last_sc_write = NULL;
2624
2625         if (curr->is_seqcst())
2626                 last_sc_write = get_last_seq_cst_write(curr);
2627
2628         /* Iterate over all threads */
2629         for (i = 0; i < thrd_lists->size(); i++) {
2630                 /* Iterate over actions in thread, starting from most recent */
2631                 action_list_t *list = &(*thrd_lists)[i];
2632                 action_list_t::reverse_iterator rit;
2633                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2634                         ModelAction *act = *rit;
2635
2636                         /* Only consider 'write' actions */
2637                         if (!act->is_write() || act == curr)
2638                                 continue;
2639
2640                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2641                         bool allow_read = true;
2642
2643                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2644                                 allow_read = false;
2645                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2646                                 allow_read = false;
2647
2648                         if (allow_read) {
2649                                 /* Only add feasible reads */
2650                                 mo_graph->startChanges();
2651                                 r_modification_order(curr, act);
2652                                 if (!is_infeasible())
2653                                         curr->get_node()->add_read_from_past(act);
2654                                 mo_graph->rollbackChanges();
2655                         }
2656
2657                         /* Include at most one act per-thread that "happens before" curr */
2658                         if (act->happens_before(curr))
2659                                 break;
2660                 }
2661         }
2662
2663         /* Inherit existing, promised future values */
2664         for (i = 0; i < promises.size(); i++) {
2665                 const Promise *promise = promises[i];
2666                 const ModelAction *promise_read = promise->get_reader(0);
2667                 if (promise_read->same_var(curr)) {
2668                         /* Only add feasible future-values */
2669                         mo_graph->startChanges();
2670                         r_modification_order(curr, promise);
2671                         if (!is_infeasible())
2672                                 curr->get_node()->add_read_from_promise(promise_read);
2673                         mo_graph->rollbackChanges();
2674                 }
2675         }
2676
2677         /* We may find no valid may-read-from only if the execution is doomed */
2678         if (!curr->get_node()->read_from_size()) {
2679                 priv->no_valid_reads = true;
2680                 set_assert();
2681         }
2682
2683         if (DBG_ENABLED()) {
2684                 model_print("Reached read action:\n");
2685                 curr->print();
2686                 model_print("Printing read_from_past\n");
2687                 curr->get_node()->print_read_from_past();
2688                 model_print("End printing read_from_past\n");
2689         }
2690 }
2691
2692 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2693 {
2694         for ( ; write != NULL; write = write->get_reads_from()) {
2695                 /* UNINIT actions don't have a Node, and they never sleep */
2696                 if (write->is_uninitialized())
2697                         return true;
2698                 Node *prevnode = write->get_node()->get_parent();
2699
2700                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2701                 if (write->is_release() && thread_sleep)
2702                         return true;
2703                 if (!write->is_rmw())
2704                         return false;
2705         }
2706         return true;
2707 }
2708
2709 /**
2710  * @brief Get an action representing an uninitialized atomic
2711  *
2712  * This function may create a new one or try to retrieve one from the NodeStack
2713  *
2714  * @param curr The current action, which prompts the creation of an UNINIT action
2715  * @return A pointer to the UNINIT ModelAction
2716  */
2717 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2718 {
2719         Node *node = curr->get_node();
2720         ModelAction *act = node->get_uninit_action();
2721         if (!act) {
2722                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2723                 node->set_uninit_action(act);
2724         }
2725         act->create_cv(NULL);
2726         return act;
2727 }
2728
2729 static void print_list(const action_list_t *list)
2730 {
2731         action_list_t::const_iterator it;
2732
2733         model_print("------------------------------------------------------------------------------------\n");
2734         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2735         model_print("------------------------------------------------------------------------------------\n");
2736
2737         unsigned int hash = 0;
2738
2739         for (it = list->begin(); it != list->end(); it++) {
2740                 const ModelAction *act = *it;
2741                 if (act->get_seq_number() > 0)
2742                         act->print();
2743                 hash = hash^(hash<<3)^((*it)->hash());
2744         }
2745         model_print("HASH %u\n", hash);
2746         model_print("------------------------------------------------------------------------------------\n");
2747 }
2748
2749 #if SUPPORT_MOD_ORDER_DUMP
2750 void ModelExecution::dumpGraph(char *filename) const
2751 {
2752         char buffer[200];
2753         sprintf(buffer, "%s.dot", filename);
2754         FILE *file = fopen(buffer, "w");
2755         fprintf(file, "digraph %s {\n", filename);
2756         mo_graph->dumpNodes(file);
2757         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2758
2759         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2760                 ModelAction *act = *it;
2761                 if (act->is_read()) {
2762                         mo_graph->dot_print_node(file, act);
2763                         if (act->get_reads_from())
2764                                 mo_graph->dot_print_edge(file,
2765                                                 act->get_reads_from(),
2766                                                 act,
2767                                                 "label=\"rf\", color=red, weight=2");
2768                         else
2769                                 mo_graph->dot_print_edge(file,
2770                                                 act->get_reads_from_promise(),
2771                                                 act,
2772                                                 "label=\"rf\", color=red");
2773                 }
2774                 if (thread_array[act->get_tid()]) {
2775                         mo_graph->dot_print_edge(file,
2776                                         thread_array[id_to_int(act->get_tid())],
2777                                         act,
2778                                         "label=\"sb\", color=blue, weight=400");
2779                 }
2780
2781                 thread_array[act->get_tid()] = act;
2782         }
2783         fprintf(file, "}\n");
2784         model_free(thread_array);
2785         fclose(file);
2786 }
2787 #endif
2788
2789 /** @brief Prints an execution trace summary. */
2790 void ModelExecution::print_summary() const
2791 {
2792 #if SUPPORT_MOD_ORDER_DUMP
2793         char buffername[100];
2794         sprintf(buffername, "exec%04u", get_execution_number());
2795         mo_graph->dumpGraphToFile(buffername);
2796         sprintf(buffername, "graph%04u", get_execution_number());
2797         dumpGraph(buffername);
2798 #endif
2799
2800         model_print("Execution trace %d:", get_execution_number());
2801         if (isfeasibleprefix()) {
2802                 if (is_yieldblocked())
2803                         model_print(" YIELD BLOCKED");
2804                 if (scheduler->all_threads_sleeping())
2805                         model_print(" SLEEP-SET REDUNDANT");
2806                 if (have_bug_reports())
2807                         model_print(" DETECTED BUG(S)");
2808         } else
2809                 print_infeasibility(" INFEASIBLE");
2810         model_print("\n");
2811
2812         print_list(&action_trace);
2813         model_print("\n");
2814
2815         if (!promises.empty()) {
2816                 model_print("Pending promises:\n");
2817                 for (unsigned int i = 0; i < promises.size(); i++) {
2818                         model_print(" [P%u] ", i);
2819                         promises[i]->print();
2820                 }
2821                 model_print("\n");
2822         }
2823 }
2824
2825 /**
2826  * Add a Thread to the system for the first time. Should only be called once
2827  * per thread.
2828  * @param t The Thread to add
2829  */
2830 void ModelExecution::add_thread(Thread *t)
2831 {
2832         unsigned int i = id_to_int(t->get_id());
2833         if (i >= thread_map.size())
2834                 thread_map.resize(i + 1);
2835         thread_map[i] = t;
2836         if (!t->is_model_thread())
2837                 scheduler->add_thread(t);
2838 }
2839
2840 /**
2841  * @brief Get a Thread reference by its ID
2842  * @param tid The Thread's ID
2843  * @return A Thread reference
2844  */
2845 Thread * ModelExecution::get_thread(thread_id_t tid) const
2846 {
2847         unsigned int i = id_to_int(tid);
2848         if (i < thread_map.size())
2849                 return thread_map[i];
2850         return NULL;
2851 }
2852
2853 /**
2854  * @brief Get a reference to the Thread in which a ModelAction was executed
2855  * @param act The ModelAction
2856  * @return A Thread reference
2857  */
2858 Thread * ModelExecution::get_thread(const ModelAction *act) const
2859 {
2860         return get_thread(act->get_tid());
2861 }
2862
2863 /**
2864  * @brief Get a Thread reference by its pthread ID
2865  * @param index The pthread's ID
2866  * @return A Thread reference
2867  */
2868 Thread * ModelExecution::get_pthread(pthread_t pid) {
2869         if (pid < pthread_counter + 1) return pthread_map[pid];
2870         else return NULL;
2871 }
2872
2873 /**
2874  * @brief Get a Promise's "promise number"
2875  *
2876  * A "promise number" is an index number that is unique to a promise, valid
2877  * only for a specific snapshot of an execution trace. Promises may come and go
2878  * as they are generated an resolved, so an index only retains meaning for the
2879  * current snapshot.
2880  *
2881  * @param promise The Promise to check
2882  * @return The promise index, if the promise still is valid; otherwise -1
2883  */
2884 int ModelExecution::get_promise_number(const Promise *promise) const
2885 {
2886         for (unsigned int i = 0; i < promises.size(); i++)
2887                 if (promises[i] == promise)
2888                         return i;
2889         /* Not found */
2890         return -1;
2891 }
2892
2893 /**
2894  * @brief Check if a Thread is currently enabled
2895  * @param t The Thread to check
2896  * @return True if the Thread is currently enabled
2897  */
2898 bool ModelExecution::is_enabled(Thread *t) const
2899 {
2900         return scheduler->is_enabled(t);
2901 }
2902
2903 /**
2904  * @brief Check if a Thread is currently enabled
2905  * @param tid The ID of the Thread to check
2906  * @return True if the Thread is currently enabled
2907  */
2908 bool ModelExecution::is_enabled(thread_id_t tid) const
2909 {
2910         return scheduler->is_enabled(tid);
2911 }
2912
2913 /**
2914  * @brief Select the next thread to execute based on the curren action
2915  *
2916  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2917  * actions should be followed by the execution of their child thread. In either
2918  * case, the current action should determine the next thread schedule.
2919  *
2920  * @param curr The current action
2921  * @return The next thread to run, if the current action will determine this
2922  * selection; otherwise NULL
2923  */
2924 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2925 {
2926         /* Do not split atomic RMW */
2927         if (curr->is_rmwr())
2928                 return get_thread(curr);
2929         if (curr->is_write()) {
2930 //              std::memory_order order = curr->get_mo(); 
2931 //              switch(order) {
2932 //                      case std::memory_order_relaxed: 
2933 //                              return get_thread(curr);
2934 //                      case std::memory_order_release:
2935 //                              return get_thread(curr);
2936 //                      defalut:
2937 //                              return NULL;
2938 //              }       
2939                 return NULL;
2940         }
2941
2942         /* Follow CREATE with the created thread */
2943         /* which is not needed, because model.cc takes care of this */
2944         if (curr->get_type() == THREAD_CREATE)
2945                 return curr->get_thread_operand(); 
2946         if (curr->get_type() == PTHREAD_CREATE) {
2947                 return curr->get_thread_operand();
2948         }
2949         return NULL;
2950 }
2951
2952 /** @return True if the execution has taken too many steps */
2953 bool ModelExecution::too_many_steps() const
2954 {
2955         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2956 }
2957
2958 /**
2959  * Takes the next step in the execution, if possible.
2960  * @param curr The current step to take
2961  * @return Returns the next Thread to run, if any; NULL if this execution
2962  * should terminate
2963  */
2964 Thread * ModelExecution::take_step(ModelAction *curr)
2965 {
2966         Thread *curr_thrd = get_thread(curr);
2967         ASSERT(curr_thrd->get_state() == THREAD_READY);
2968
2969         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2970         curr = check_current_action(curr);
2971         ASSERT(curr);
2972
2973         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2974                 scheduler->remove_thread(curr_thrd);
2975
2976         return action_select_next_thread(curr);
2977 }
2978
2979 /**
2980  * Launch end-of-execution release sequence fixups only when
2981  * the execution is otherwise feasible AND there are:
2982  *
2983  * (1) pending release sequences
2984  * (2) pending assertions that could be invalidated by a change
2985  * in clock vectors (i.e., data races)
2986  * (3) no pending promises
2987  */
2988 void ModelExecution::fixup_release_sequences()
2989 {
2990         while (!pending_rel_seqs.empty() &&
2991                         is_feasible_prefix_ignore_relseq() &&
2992                         haveUnrealizedRaces()) {
2993                 model_print("*** WARNING: release sequence fixup action "
2994                                 "(%zu pending release seuqence(s)) ***\n",
2995                                 pending_rel_seqs.size());
2996                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2997                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2998                                 model_thread);
2999                 take_step(fixup);
3000         };
3001 }