Remove promises
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 /**
21  * Structure for holding small ModelChecker members that should be snapshotted
22  */
23 struct model_snapshot_members {
24         model_snapshot_members() :
25                 /* First thread created will have id INITIAL_THREAD_ID */
26                 next_thread_id(INITIAL_THREAD_ID),
27                 used_sequence_numbers(0),
28                 next_backtrack(NULL),
29                 bugs(),
30                 too_many_reads(false),
31                 no_valid_reads(false),
32                 bad_synchronization(false),
33                 bad_sc_read(false),
34                 asserted(false)
35         { }
36
37         ~model_snapshot_members() {
38                 for (unsigned int i = 0; i < bugs.size(); i++)
39                         delete bugs[i];
40                 bugs.clear();
41         }
42
43         unsigned int next_thread_id;
44         modelclock_t used_sequence_numbers;
45         ModelAction *next_backtrack;
46         SnapVector<bug_message *> bugs;
47         bool too_many_reads;
48         bool no_valid_reads;
49         /** @brief Incorrectly-ordered synchronization was made */
50         bool bad_synchronization;
51         bool bad_sc_read;
52         bool asserted;
53
54         SNAPSHOTALLOC
55 };
56
57 /** @brief Constructor */
58 ModelExecution::ModelExecution(ModelChecker *m,
59                 const struct model_params *params,
60                 Scheduler *scheduler,
61                 NodeStack *node_stack) :
62         pthread_counter(0),
63         model(m),
64         params(params),
65         scheduler(scheduler),
66         action_trace(),
67         thread_map(2), /* We'll always need at least 2 threads */
68         pthread_map(0),
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         mutex_map(),
73         cond_map(),
74         pending_rel_seqs(),
75         thrd_last_action(1),
76         thrd_last_fence_release(),
77         node_stack(node_stack),
78         priv(new struct model_snapshot_members()),
79         mo_graph(new CycleGraph())
80 {
81         /* Initialize a model-checker thread, for special ModelActions */
82         model_thread = new Thread(get_next_id());       // L: Create model thread
83         add_thread(model_thread);                       // L: Add model thread to scheduler
84         scheduler->register_engine(this);
85         node_stack->register_engine(this);
86 }
87
88 /** @brief Destructor */
89 ModelExecution::~ModelExecution()
90 {
91         for (unsigned int i = 0; i < get_num_threads(); i++)
92                 delete get_thread(int_to_id(i));
93
94         delete mo_graph;
95         delete priv;
96 }
97
98 int ModelExecution::get_execution_number() const
99 {
100         return model->get_execution_number();
101 }
102
103 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
104 {
105         action_list_t *tmp = hash->get(ptr);
106         if (tmp == NULL) {
107                 tmp = new action_list_t();
108                 hash->put(ptr, tmp);
109         }
110         return tmp;
111 }
112
113 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
114 {
115         SnapVector<action_list_t> *tmp = hash->get(ptr);
116         if (tmp == NULL) {
117                 tmp = new SnapVector<action_list_t>();
118                 hash->put(ptr, tmp);
119         }
120         return tmp;
121 }
122
123 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
124 {
125         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
126         if (wrv==NULL)
127                 return NULL;
128         unsigned int thread=id_to_int(tid);
129         if (thread < wrv->size())
130                 return &(*wrv)[thread];
131         else
132                 return NULL;
133 }
134
135 /** @return a thread ID for a new Thread */
136 thread_id_t ModelExecution::get_next_id()
137 {
138         return priv->next_thread_id++;
139 }
140
141 /** @return the number of user threads created during this execution */
142 unsigned int ModelExecution::get_num_threads() const
143 {
144         return priv->next_thread_id;
145 }
146
147 /** @return a sequence number for a new ModelAction */
148 modelclock_t ModelExecution::get_next_seq_num()
149 {
150         return ++priv->used_sequence_numbers;
151 }
152
153 /**
154  * @brief Should the current action wake up a given thread?
155  *
156  * @param curr The current action
157  * @param thread The thread that we might wake up
158  * @return True, if we should wake up the sleeping thread; false otherwise
159  */
160 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
161 {
162         const ModelAction *asleep = thread->get_pending();
163         /* Don't allow partial RMW to wake anyone up */
164         if (curr->is_rmwr())
165                 return false;
166         /* Synchronizing actions may have been backtracked */
167         if (asleep->could_synchronize_with(curr))
168                 return true;
169         /* All acquire/release fences and fence-acquire/store-release */
170         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
171                 return true;
172         /* Fence-release + store can awake load-acquire on the same location */
173         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
174                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
175                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
176                         return true;
177         }
178         return false;
179 }
180
181 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
182 {
183         for (unsigned int i = 0; i < get_num_threads(); i++) {
184                 Thread *thr = get_thread(int_to_id(i));
185                 if (scheduler->is_sleep_set(thr)) {
186                         if (should_wake_up(curr, thr))
187                                 /* Remove this thread from sleep set */
188                                 scheduler->remove_sleep(thr);
189                 }
190         }
191 }
192
193 /** @brief Alert the model-checker that an incorrectly-ordered
194  * synchronization was made */
195 void ModelExecution::set_bad_synchronization()
196 {
197         priv->bad_synchronization = true;
198 }
199
200 /** @brief Alert the model-checker that an incorrectly-ordered
201  * synchronization was made */
202 void ModelExecution::set_bad_sc_read()
203 {
204         priv->bad_sc_read = true;
205 }
206
207 bool ModelExecution::assert_bug(const char *msg)
208 {
209         priv->bugs.push_back(new bug_message(msg));
210
211         if (isfeasibleprefix()) {
212                 set_assert();
213                 return true;
214         }
215         return false;
216 }
217
218 /** @return True, if any bugs have been reported for this execution */
219 bool ModelExecution::have_bug_reports() const
220 {
221         return priv->bugs.size() != 0;
222 }
223
224 SnapVector<bug_message *> * ModelExecution::get_bugs() const
225 {
226         return &priv->bugs;
227 }
228
229 /**
230  * Check whether the current trace has triggered an assertion which should halt
231  * its execution.
232  *
233  * @return True, if the execution should be aborted; false otherwise
234  */
235 bool ModelExecution::has_asserted() const
236 {
237         return priv->asserted;
238 }
239
240 /**
241  * Trigger a trace assertion which should cause this execution to be halted.
242  * This can be due to a detected bug or due to an infeasibility that should
243  * halt ASAP.
244  */
245 void ModelExecution::set_assert()
246 {
247         priv->asserted = true;
248 }
249
250 /**
251  * Check if we are in a deadlock. Should only be called at the end of an
252  * execution, although it should not give false positives in the middle of an
253  * execution (there should be some ENABLED thread).
254  *
255  * @return True if program is in a deadlock; false otherwise
256  */
257 bool ModelExecution::is_deadlocked() const
258 {
259         bool blocking_threads = false;
260         for (unsigned int i = 0; i < get_num_threads(); i++) {
261                 thread_id_t tid = int_to_id(i);
262                 if (is_enabled(tid))
263                         return false;
264                 Thread *t = get_thread(tid);
265                 if (!t->is_model_thread() && t->get_pending())
266                         blocking_threads = true;
267         }
268         return blocking_threads;
269 }
270
271 /**
272  * @brief Check if we are yield-blocked
273  *
274  * A program can be "yield-blocked" if all threads are ready to execute a
275  * yield.
276  *
277  * @return True if the program is yield-blocked; false otherwise
278  */
279 bool ModelExecution::is_yieldblocked() const
280 {
281         if (!params->yieldblock)
282                 return false;
283
284         for (unsigned int i = 0; i < get_num_threads(); i++) {
285                 thread_id_t tid = int_to_id(i);
286                 Thread *t = get_thread(tid);
287                 if (t->get_pending() && t->get_pending()->is_yield())
288                         return true;
289         }
290         return false;
291 }
292
293 /**
294  * Check if this is a complete execution. That is, have all thread completed
295  * execution (rather than exiting because sleep sets have forced a redundant
296  * execution).
297  *
298  * @return True if the execution is complete.
299  */
300 bool ModelExecution::is_complete_execution() const
301 {
302         if (is_yieldblocked())
303                 return false;
304         for (unsigned int i = 0; i < get_num_threads(); i++)
305                 if (is_enabled(int_to_id(i)))
306                         return false;
307         return true;
308 }
309
310 /**
311  * @brief Find the last fence-related backtracking conflict for a ModelAction
312  *
313  * This function performs the search for the most recent conflicting action
314  * against which we should perform backtracking, as affected by fence
315  * operations. This includes pairs of potentially-synchronizing actions which
316  * occur due to fence-acquire or fence-release, and hence should be explored in
317  * the opposite execution order.
318  *
319  * @param act The current action
320  * @return The most recent action which conflicts with act due to fences
321  */
322 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
323 {
324         /* Only perform release/acquire fence backtracking for stores */
325         if (!act->is_write())
326                 return NULL;
327
328         /* Find a fence-release (or, act is a release) */
329         ModelAction *last_release;
330         if (act->is_release())
331                 last_release = act;
332         else
333                 last_release = get_last_fence_release(act->get_tid());
334         if (!last_release)
335                 return NULL;
336
337         /* Skip past the release */
338         const action_list_t *list = &action_trace;
339         action_list_t::const_reverse_iterator rit;
340         for (rit = list->rbegin(); rit != list->rend(); rit++)
341                 if (*rit == last_release)
342                         break;
343         ASSERT(rit != list->rend());
344
345         /* Find a prior:
346          *   load-acquire
347          * or
348          *   load --sb-> fence-acquire */
349         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
350         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
351         bool found_acquire_fences = false;
352         for ( ; rit != list->rend(); rit++) {
353                 ModelAction *prev = *rit;
354                 if (act->same_thread(prev))
355                         continue;
356
357                 int tid = id_to_int(prev->get_tid());
358
359                 if (prev->is_read() && act->same_var(prev)) {
360                         if (prev->is_acquire()) {
361                                 /* Found most recent load-acquire, don't need
362                                  * to search for more fences */
363                                 if (!found_acquire_fences)
364                                         return NULL;
365                         } else {
366                                 prior_loads[tid] = prev;
367                         }
368                 }
369                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
370                         found_acquire_fences = true;
371                         acquire_fences[tid] = prev;
372                 }
373         }
374
375         ModelAction *latest_backtrack = NULL;
376         for (unsigned int i = 0; i < acquire_fences.size(); i++)
377                 if (acquire_fences[i] && prior_loads[i])
378                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
379                                 latest_backtrack = acquire_fences[i];
380         return latest_backtrack;
381 }
382
383 /**
384  * @brief Find the last backtracking conflict for a ModelAction
385  *
386  * This function performs the search for the most recent conflicting action
387  * against which we should perform backtracking. This primary includes pairs of
388  * synchronizing actions which should be explored in the opposite execution
389  * order.
390  *
391  * @param act The current action
392  * @return The most recent action which conflicts with act
393  */
394 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
395 {
396         switch (act->get_type()) {
397         case ATOMIC_FENCE:
398                 /* Only seq-cst fences can (directly) cause backtracking */
399                 if (!act->is_seqcst())
400                         break;
401         case ATOMIC_READ:
402         case ATOMIC_WRITE:
403         case ATOMIC_RMW: {
404                 ModelAction *ret = NULL;
405
406                 /* linear search: from most recent to oldest */
407                 action_list_t *list = obj_map.get(act->get_location());
408                 action_list_t::reverse_iterator rit;
409                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
410                         ModelAction *prev = *rit;
411                         if (prev == act)
412                                 continue;
413                         if (prev->could_synchronize_with(act)) {
414                                 ret = prev;
415                                 break;
416                         }
417                 }
418
419                 ModelAction *ret2 = get_last_fence_conflict(act);
420                 if (!ret2)
421                         return ret;
422                 if (!ret)
423                         return ret2;
424                 if (*ret < *ret2)
425                         return ret2;
426                 return ret;
427         }
428         case ATOMIC_LOCK:
429         case ATOMIC_TRYLOCK: {
430                 /* linear search: from most recent to oldest */
431                 action_list_t *list = obj_map.get(act->get_location());
432                 action_list_t::reverse_iterator rit;
433                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
434                         ModelAction *prev = *rit;
435                         if (act->is_conflicting_lock(prev))
436                                 return prev;
437                 }
438                 break;
439         }
440         case ATOMIC_UNLOCK: {
441                 /* linear search: from most recent to oldest */
442                 action_list_t *list = obj_map.get(act->get_location());
443                 action_list_t::reverse_iterator rit;
444                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
445                         ModelAction *prev = *rit;
446                         if (!act->same_thread(prev) && prev->is_failed_trylock())
447                                 return prev;
448                 }
449                 break;
450         }
451         case ATOMIC_WAIT: {
452                 /* linear search: from most recent to oldest */
453                 action_list_t *list = obj_map.get(act->get_location());
454                 action_list_t::reverse_iterator rit;
455                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
456                         ModelAction *prev = *rit;
457                         if (!act->same_thread(prev) && prev->is_failed_trylock())
458                                 return prev;
459                         if (!act->same_thread(prev) && prev->is_notify())
460                                 return prev;
461                 }
462                 break;
463         }
464
465         case ATOMIC_NOTIFY_ALL:
466         case ATOMIC_NOTIFY_ONE: {
467                 /* linear search: from most recent to oldest */
468                 action_list_t *list = obj_map.get(act->get_location());
469                 action_list_t::reverse_iterator rit;
470                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
471                         ModelAction *prev = *rit;
472                         if (!act->same_thread(prev) && prev->is_wait())
473                                 return prev;
474                 }
475                 break;
476         }
477         default:
478                 break;
479         }
480         return NULL;
481 }
482
483 /** This method finds backtracking points where we should try to
484  * reorder the parameter ModelAction against.
485  *
486  * @param the ModelAction to find backtracking points for.
487  */
488 void ModelExecution::set_backtracking(ModelAction *act)
489 {
490         Thread *t = get_thread(act);
491         ModelAction *prev = get_last_conflict(act);
492         if (prev == NULL)
493                 return;
494
495         Node *node = prev->get_node()->get_parent();
496
497         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
498         int low_tid, high_tid;
499         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
500                 low_tid = id_to_int(act->get_tid());
501                 high_tid = low_tid + 1;
502         } else {
503                 low_tid = 0;
504                 high_tid = get_num_threads();
505         }
506
507         for (int i = low_tid; i < high_tid; i++) {
508                 thread_id_t tid = int_to_id(i);
509
510                 /* Make sure this thread can be enabled here. */
511                 if (i >= node->get_num_threads())
512                         break;
513
514                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
515                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
516                 if (node->enabled_status(tid) != THREAD_ENABLED)
517                         continue;
518
519                 /* Check if this has been explored already */
520                 if (node->has_been_explored(tid))
521                         continue;
522
523                 /* See if fairness allows */
524                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
525                         bool unfair = false;
526                         for (int t = 0; t < node->get_num_threads(); t++) {
527                                 thread_id_t tother = int_to_id(t);
528                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
529                                         unfair = true;
530                                         break;
531                                 }
532                         }
533                         if (unfair)
534                                 continue;
535                 }
536
537                 /* See if CHESS-like yield fairness allows */
538                 if (params->yieldon) {
539                         bool unfair = false;
540                         for (int t = 0; t < node->get_num_threads(); t++) {
541                                 thread_id_t tother = int_to_id(t);
542                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
543                                         unfair = true;
544                                         break;
545                                 }
546                         }
547                         if (unfair)
548                                 continue;
549                 }
550
551                 /* Cache the latest backtracking point */
552                 set_latest_backtrack(prev);
553
554                 /* If this is a new backtracking point, mark the tree */
555                 if (!node->set_backtrack(tid))
556                         continue;
557                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
558                                         id_to_int(prev->get_tid()),
559                                         id_to_int(t->get_id()));
560                 if (DBG_ENABLED()) {
561                         prev->print();
562                         act->print();
563                 }
564         }
565 }
566
567 /**
568  * @brief Cache the a backtracking point as the "most recent", if eligible
569  *
570  * Note that this does not prepare the NodeStack for this backtracking
571  * operation, it only caches the action on a per-execution basis
572  *
573  * @param act The operation at which we should explore a different next action
574  * (i.e., backtracking point)
575  * @return True, if this action is now the most recent backtracking point;
576  * false otherwise
577  */
578 bool ModelExecution::set_latest_backtrack(ModelAction *act)
579 {
580         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
581                 priv->next_backtrack = act;
582                 return true;
583         }
584         return false;
585 }
586
587 /**
588  * Returns last backtracking point. The model checker will explore a different
589  * path for this point in the next execution.
590  * @return The ModelAction at which the next execution should diverge.
591  */
592 ModelAction * ModelExecution::get_next_backtrack()
593 {
594         ModelAction *next = priv->next_backtrack;
595         priv->next_backtrack = NULL;
596         return next;
597 }
598
599 /**
600  * Processes a read model action.
601  * @param curr is the read model action to process.
602  * @return True if processing this read updates the mo_graph.
603  */
604 bool ModelExecution::process_read(ModelAction *curr)
605 {
606         Node *node = curr->get_node();
607         while (true) {
608                 bool updated = false;
609                 switch (node->get_read_from_status()) {
610                 case READ_FROM_PAST: {
611                         const ModelAction *rf = node->get_read_from_past();
612                         ASSERT(rf);
613
614                         mo_graph->startChanges();
615
616                         ASSERT(!is_infeasible());
617                         if (!check_recency(curr, rf)) {
618                                 if (node->increment_read_from()) {
619                                         mo_graph->rollbackChanges();
620                                         continue;
621                                 } else {
622                                         priv->too_many_reads = true;
623                                 }
624                         }
625
626                         updated = r_modification_order(curr, rf);
627                         read_from(curr, rf);
628                         mo_graph->commitChanges();
629                         break;
630                 }
631                 default:
632                         ASSERT(false);
633                 }
634                 get_thread(curr)->set_return_value(curr->get_return_value());
635                 return updated;
636         }
637 }
638
639 /**
640  * Processes a lock, trylock, or unlock model action.  @param curr is
641  * the read model action to process.
642  *
643  * The try lock operation checks whether the lock is taken.  If not,
644  * it falls to the normal lock operation case.  If so, it returns
645  * fail.
646  *
647  * The lock operation has already been checked that it is enabled, so
648  * it just grabs the lock and synchronizes with the previous unlock.
649  *
650  * The unlock operation has to re-enable all of the threads that are
651  * waiting on the lock.
652  *
653  * @return True if synchronization was updated; false otherwise
654  */
655 bool ModelExecution::process_mutex(ModelAction *curr)
656 {
657         cdsc::mutex *mutex = curr->get_mutex();
658         struct cdsc::mutex_state *state = NULL;
659
660         if (mutex)
661                 state = mutex->get_state();
662
663         switch (curr->get_type()) {
664         case ATOMIC_TRYLOCK: {
665                 bool success = !state->locked;
666                 curr->set_try_lock(success);
667                 if (!success) {
668                         get_thread(curr)->set_return_value(0);
669                         break;
670                 }
671                 get_thread(curr)->set_return_value(1);
672         }
673                 //otherwise fall into the lock case
674         case ATOMIC_LOCK: {
675                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
676                         assert_bug("Lock access before initialization");
677                 state->locked = get_thread(curr);
678                 ModelAction *unlock = get_last_unlock(curr);
679                 //synchronize with the previous unlock statement
680                 if (unlock != NULL) {
681                         synchronize(unlock, curr);
682                         return true;
683                 }
684                 break;
685         }
686         case ATOMIC_WAIT:
687         case ATOMIC_UNLOCK: {
688                 /* wake up the other threads */
689                 for (unsigned int i = 0; i < get_num_threads(); i++) {
690                         Thread *t = get_thread(int_to_id(i));
691                         Thread *curr_thrd = get_thread(curr);
692                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
693                                 scheduler->wake(t);
694                 }
695
696                 /* unlock the lock - after checking who was waiting on it */
697                 state->locked = NULL;
698
699                 if (!curr->is_wait())
700                         break; /* The rest is only for ATOMIC_WAIT */
701
702                 /* Should we go to sleep? (simulate spurious failures) */
703                 if (curr->get_node()->get_misc() == 0) {
704                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
705                         /* disable us */
706                         scheduler->sleep(get_thread(curr));
707                 }
708                 break;
709         }
710         case ATOMIC_NOTIFY_ALL: {
711                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
712                 //activate all the waiting threads
713                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
714                         scheduler->wake(get_thread(*rit));
715                 }
716                 waiters->clear();
717                 break;
718         }
719         case ATOMIC_NOTIFY_ONE: {
720                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
721                 int wakeupthread = curr->get_node()->get_misc();
722                 action_list_t::iterator it = waiters->begin();
723
724                 // WL
725                 if (it == waiters->end())
726                         break;
727
728                 advance(it, wakeupthread);
729                 scheduler->wake(get_thread(*it));
730                 waiters->erase(it);
731                 break;
732         }
733
734         default:
735                 ASSERT(0);
736         }
737         return false;
738 }
739
740 /**
741  * Process a write ModelAction
742  * @param curr The ModelAction to process
743  * @param work The work queue, for adding fixup work
744  * @return True if the mo_graph was updated or promises were resolved
745  */
746 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
747 {
748         /* Readers to which we may send our future value */
749         ModelVector<ModelAction *> send_fv;
750
751
752         bool updated_mod_order = w_modification_order(curr);
753
754         mo_graph->commitChanges();
755
756         get_thread(curr)->set_return_value(VALUE_NONE);
757         return updated_mod_order;
758 }
759
760 /**
761  * Process a fence ModelAction
762  * @param curr The ModelAction to process
763  * @return True if synchronization was updated
764  */
765 bool ModelExecution::process_fence(ModelAction *curr)
766 {
767         /*
768          * fence-relaxed: no-op
769          * fence-release: only log the occurence (not in this function), for
770          *   use in later synchronization
771          * fence-acquire (this function): search for hypothetical release
772          *   sequences
773          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
774          */
775         bool updated = false;
776         if (curr->is_acquire()) {
777                 action_list_t *list = &action_trace;
778                 action_list_t::reverse_iterator rit;
779                 /* Find X : is_read(X) && X --sb-> curr */
780                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
781                         ModelAction *act = *rit;
782                         if (act == curr)
783                                 continue;
784                         if (act->get_tid() != curr->get_tid())
785                                 continue;
786                         /* Stop at the beginning of the thread */
787                         if (act->is_thread_start())
788                                 break;
789                         /* Stop once we reach a prior fence-acquire */
790                         if (act->is_fence() && act->is_acquire())
791                                 break;
792                         if (!act->is_read())
793                                 continue;
794                         /* read-acquire will find its own release sequences */
795                         if (act->is_acquire())
796                                 continue;
797
798                         /* Establish hypothetical release sequences */
799                         rel_heads_list_t release_heads;
800                         get_release_seq_heads(curr, act, &release_heads);
801                         for (unsigned int i = 0; i < release_heads.size(); i++)
802                                 synchronize(release_heads[i], curr);
803                         if (release_heads.size() != 0)
804                                 updated = true;
805                 }
806         }
807         return updated;
808 }
809
810 /**
811  * @brief Process the current action for thread-related activity
812  *
813  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
814  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
815  * synchronization, etc.  This function is a no-op for non-THREAD actions
816  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
817  *
818  * @param curr The current action
819  * @return True if synchronization was updated or a thread completed
820  */
821 bool ModelExecution::process_thread_action(ModelAction *curr)
822 {
823         bool updated = false;
824
825         switch (curr->get_type()) {
826         case THREAD_CREATE: {
827                 thrd_t *thrd = (thrd_t *)curr->get_location();
828                 struct thread_params *params = (struct thread_params *)curr->get_value();
829                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
830                 curr->set_thread_operand(th);
831                 add_thread(th);
832                 th->set_creation(curr);
833                 break;
834         }
835         case PTHREAD_CREATE: {
836                 (*(pthread_t *)curr->get_location()) = pthread_counter++;       
837
838                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
839                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
840                 curr->set_thread_operand(th);
841                 add_thread(th);
842                 th->set_creation(curr);
843
844                 if ( pthread_map.size() < pthread_counter )
845                         pthread_map.resize( pthread_counter );
846                 pthread_map[ pthread_counter-1 ] = th;
847
848                 break;
849         }
850         case THREAD_JOIN: {
851                 Thread *blocking = curr->get_thread_operand();
852                 ModelAction *act = get_last_action(blocking->get_id());
853                 synchronize(act, curr);
854                 updated = true; /* trigger rel-seq checks */
855                 break;
856         }
857         case PTHREAD_JOIN: {
858                 Thread *blocking = curr->get_thread_operand();
859                 ModelAction *act = get_last_action(blocking->get_id());
860                 synchronize(act, curr);
861                 updated = true; /* trigger rel-seq checks */
862                 break; // WL: to be add (modified)
863         }
864
865         case THREAD_FINISH: {
866                 Thread *th = get_thread(curr);
867                 /* Wake up any joining threads */
868                 for (unsigned int i = 0; i < get_num_threads(); i++) {
869                         Thread *waiting = get_thread(int_to_id(i));
870                         if (waiting->waiting_on() == th &&
871                                         waiting->get_pending()->is_thread_join())
872                                 scheduler->wake(waiting);
873                 }
874                 th->complete();
875                 updated = true; /* trigger rel-seq checks */
876                 break;
877         }
878         case THREAD_START: {
879                 break;
880         }
881         default:
882                 break;
883         }
884
885         return updated;
886 }
887
888 /**
889  * @brief Process the current action for release sequence fixup activity
890  *
891  * Performs model-checker release sequence fixups for the current action,
892  * forcing a single pending release sequence to break (with a given, potential
893  * "loose" write) or to complete (i.e., synchronize). If a pending release
894  * sequence forms a complete release sequence, then we must perform the fixup
895  * synchronization, mo_graph additions, etc.
896  *
897  * @param curr The current action; must be a release sequence fixup action
898  * @param work_queue The work queue to which to add work items as they are
899  * generated
900  */
901 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
902 {
903         const ModelAction *write = curr->get_node()->get_relseq_break();
904         struct release_seq *sequence = pending_rel_seqs.back();
905         pending_rel_seqs.pop_back();
906         ASSERT(sequence);
907         ModelAction *acquire = sequence->acquire;
908         const ModelAction *rf = sequence->rf;
909         const ModelAction *release = sequence->release;
910         ASSERT(acquire);
911         ASSERT(release);
912         ASSERT(rf);
913         ASSERT(release->same_thread(rf));
914
915         if (write == NULL) {
916                 /**
917                  * @todo Forcing a synchronization requires that we set
918                  * modification order constraints. For instance, we can't allow
919                  * a fixup sequence in which two separate read-acquire
920                  * operations read from the same sequence, where the first one
921                  * synchronizes and the other doesn't. Essentially, we can't
922                  * allow any writes to insert themselves between 'release' and
923                  * 'rf'
924                  */
925
926                 /* Must synchronize */
927                 if (!synchronize(release, acquire))
928                         return;
929
930                 /* Propagate the changed clock vector */
931                 propagate_clockvector(acquire, work_queue);
932         } else {
933                 /* Break release sequence with new edges:
934                  *   release --mo--> write --mo--> rf */
935                 mo_graph->addEdge(release, write);
936                 mo_graph->addEdge(write, rf);
937         }
938
939         /* See if we have realized a data race */
940         checkDataRaces();
941 }
942
943 /**
944  * Initialize the current action by performing one or more of the following
945  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
946  * in the NodeStack, manipulating backtracking sets, allocating and
947  * initializing clock vectors, and computing the promises to fulfill.
948  *
949  * @param curr The current action, as passed from the user context; may be
950  * freed/invalidated after the execution of this function, with a different
951  * action "returned" its place (pass-by-reference)
952  * @return True if curr is a newly-explored action; false otherwise
953  */
954 bool ModelExecution::initialize_curr_action(ModelAction **curr)
955 {
956         ModelAction *newcurr;
957
958         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
959                 newcurr = process_rmw(*curr);
960                 delete *curr;
961
962                 *curr = newcurr;
963                 return false;
964         }
965
966         (*curr)->set_seq_number(get_next_seq_num());
967
968         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
969         if (newcurr) {
970                 /* First restore type and order in case of RMW operation */
971                 if ((*curr)->is_rmwr())
972                         newcurr->copy_typeandorder(*curr);
973
974                 ASSERT((*curr)->get_location() == newcurr->get_location());
975                 newcurr->copy_from_new(*curr);
976
977                 /* Discard duplicate ModelAction; use action from NodeStack */
978                 delete *curr;
979
980                 /* Always compute new clock vector */
981                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
982
983                 *curr = newcurr;
984                 return false; /* Action was explored previously */
985         } else {
986                 newcurr = *curr;
987
988                 /* Always compute new clock vector */
989                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
990
991                 /* Assign most recent release fence */
992                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
993
994                 /*
995                  * Perform one-time actions when pushing new ModelAction onto
996                  * NodeStack
997                  */
998                 if (newcurr->is_relseq_fixup())
999                         compute_relseq_breakwrites(newcurr);
1000                 else if (newcurr->is_wait())
1001                         newcurr->get_node()->set_misc_max(2);
1002                 else if (newcurr->is_notify_one()) {
1003                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1004                 }
1005                 return true; /* This was a new ModelAction */
1006         }
1007 }
1008
1009 /**
1010  * @brief Establish reads-from relation between two actions
1011  *
1012  * Perform basic operations involved with establishing a concrete rf relation,
1013  * including setting the ModelAction data and checking for release sequences.
1014  *
1015  * @param act The action that is reading (must be a read)
1016  * @param rf The action from which we are reading (must be a write)
1017  *
1018  * @return True if this read established synchronization
1019  */
1020
1021 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1022 {
1023         ASSERT(rf);
1024         ASSERT(rf->is_write());
1025
1026         act->set_read_from(rf);
1027         if (act->is_acquire()) {
1028                 rel_heads_list_t release_heads;
1029                 get_release_seq_heads(act, act, &release_heads);
1030                 int num_heads = release_heads.size();
1031                 for (unsigned int i = 0; i < release_heads.size(); i++)
1032                         if (!synchronize(release_heads[i], act))
1033                                 num_heads--;
1034                 return num_heads > 0;
1035         }
1036         return false;
1037 }
1038
1039 /**
1040  * @brief Synchronizes two actions
1041  *
1042  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1043  * This function performs the synchronization as well as providing other hooks
1044  * for other checks along with synchronization.
1045  *
1046  * @param first The left-hand side of the synchronizes-with relation
1047  * @param second The right-hand side of the synchronizes-with relation
1048  * @return True if the synchronization was successful (i.e., was consistent
1049  * with the execution order); false otherwise
1050  */
1051 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1052 {
1053         if (*second < *first) {
1054                 set_bad_synchronization();
1055                 return false;
1056         }
1057         return second->synchronize_with(first);
1058 }
1059
1060 /**
1061  * @brief Check whether a model action is enabled.
1062  *
1063  * Checks whether an operation would be successful (i.e., is a lock already
1064  * locked, or is the joined thread already complete).
1065  *
1066  * For yield-blocking, yields are never enabled.
1067  *
1068  * @param curr is the ModelAction to check whether it is enabled.
1069  * @return a bool that indicates whether the action is enabled.
1070  */
1071 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1072         if (curr->is_lock()) {
1073                 cdsc::mutex *lock = curr->get_mutex();
1074                 struct cdsc::mutex_state *state = lock->get_state();
1075                 if (state->locked)
1076                         return false;
1077         } else if (curr->is_thread_join()) {
1078                 Thread *blocking = curr->get_thread_operand();
1079                 if (!blocking->is_complete()) {
1080                         return false;
1081                 }
1082         } else if (params->yieldblock && curr->is_yield()) {
1083                 return false;
1084         }
1085
1086         return true;
1087 }
1088
1089 /**
1090  * This is the heart of the model checker routine. It performs model-checking
1091  * actions corresponding to a given "current action." Among other processes, it
1092  * calculates reads-from relationships, updates synchronization clock vectors,
1093  * forms a memory_order constraints graph, and handles replay/backtrack
1094  * execution when running permutations of previously-observed executions.
1095  *
1096  * @param curr The current action to process
1097  * @return The ModelAction that is actually executed; may be different than
1098  * curr
1099  */
1100 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1101 {
1102         ASSERT(curr);
1103         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1104         bool newly_explored = initialize_curr_action(&curr);
1105
1106         DBG();
1107
1108         wake_up_sleeping_actions(curr);
1109
1110         /* Compute fairness information for CHESS yield algorithm */
1111         if (params->yieldon) {
1112                 curr->get_node()->update_yield(scheduler);
1113         }
1114
1115         /* Add the action to lists before any other model-checking tasks */
1116         if (!second_part_of_rmw)
1117                 add_action_to_lists(curr);
1118
1119         /* Build may_read_from set for newly-created actions */
1120         if (newly_explored && curr->is_read())
1121                 build_may_read_from(curr);
1122
1123         /* Initialize work_queue with the "current action" work */
1124         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1125         while (!work_queue.empty() && !has_asserted()) {
1126                 WorkQueueEntry work = work_queue.front();
1127                 work_queue.pop_front();
1128
1129                 switch (work.type) {
1130                 case WORK_CHECK_CURR_ACTION: {
1131                         ModelAction *act = work.action;
1132                         bool update = false; /* update this location's release seq's */
1133                         bool update_all = false; /* update all release seq's */
1134
1135                         if (process_thread_action(curr))
1136                                 update_all = true;
1137
1138                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1139                                 update = true;
1140
1141                         if (act->is_write() && process_write(act, &work_queue))
1142                                 update = true;
1143
1144                         if (act->is_fence() && process_fence(act))
1145                                 update_all = true;
1146
1147                         if (act->is_mutex_op() && process_mutex(act))
1148                                 update_all = true;
1149
1150                         if (act->is_relseq_fixup())
1151                                 process_relseq_fixup(curr, &work_queue);
1152
1153                         if (update_all)
1154                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1155                         else if (update)
1156                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1157                         break;
1158                 }
1159                 case WORK_CHECK_RELEASE_SEQ:
1160                         resolve_release_sequences(work.location, &work_queue);
1161                         break;
1162                 case WORK_CHECK_MO_EDGES: {
1163                         /** @todo Complete verification of work_queue */
1164                         ModelAction *act = work.action;
1165                         bool updated = false;
1166
1167                         if (act->is_read()) {
1168                                 const ModelAction *rf = act->get_reads_from();
1169                                 if (r_modification_order(act, rf))
1170                                   updated = true;
1171                                 if (act->is_seqcst()) {
1172                                   ModelAction *last_sc_write = get_last_seq_cst_write(act);
1173                                   if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1174                                     set_bad_sc_read();
1175                                   }
1176                                 }
1177                         }
1178                         if (act->is_write()) {
1179                                 if (w_modification_order(act))
1180                                         updated = true;
1181                         }
1182                         mo_graph->commitChanges();
1183
1184                         if (updated)
1185                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1186                         break;
1187                 }
1188                 default:
1189                         ASSERT(false);
1190                         break;
1191                 }
1192         }
1193
1194         check_curr_backtracking(curr);
1195         set_backtracking(curr);
1196         return curr;
1197 }
1198
1199 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1200 {
1201         Node *currnode = curr->get_node();
1202         Node *parnode = currnode->get_parent();
1203
1204         if ((parnode && !parnode->backtrack_empty()) ||
1205                          !currnode->misc_empty() ||
1206                          !currnode->read_from_empty() ||
1207                          !currnode->relseq_break_empty()) {
1208                 set_latest_backtrack(curr);
1209         }
1210 }
1211
1212 /**
1213  * This is the strongest feasibility check available.
1214  * @return whether the current trace (partial or complete) must be a prefix of
1215  * a feasible trace.
1216  */
1217 bool ModelExecution::isfeasibleprefix() const
1218 {
1219         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1220 }
1221
1222 /**
1223  * Print disagnostic information about an infeasible execution
1224  * @param prefix A string to prefix the output with; if NULL, then a default
1225  * message prefix will be provided
1226  */
1227 void ModelExecution::print_infeasibility(const char *prefix) const
1228 {
1229         char buf[100];
1230         char *ptr = buf;
1231         if (mo_graph->checkForCycles())
1232                 ptr += sprintf(ptr, "[mo cycle]");
1233         if (priv->too_many_reads)
1234                 ptr += sprintf(ptr, "[too many reads]");
1235         if (priv->no_valid_reads)
1236                 ptr += sprintf(ptr, "[no valid reads-from]");
1237         if (priv->bad_synchronization)
1238                 ptr += sprintf(ptr, "[bad sw ordering]");
1239         if (priv->bad_sc_read)
1240                 ptr += sprintf(ptr, "[bad sc read]");
1241         if (ptr != buf)
1242                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1243 }
1244
1245 /**
1246  * Returns whether the current completed trace is feasible, except for pending
1247  * release sequences.
1248  */
1249 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1250 {
1251   return !is_infeasible() ;
1252
1253 }
1254
1255 /**
1256  * Check if the current partial trace is infeasible. Does not check any
1257  * end-of-execution flags, which might rule out the execution. Thus, this is
1258  * useful only for ruling an execution as infeasible.
1259  * @return whether the current partial trace is infeasible.
1260  */
1261 bool ModelExecution::is_infeasible() const
1262 {
1263         return mo_graph->checkForCycles() ||
1264                 priv->no_valid_reads ||
1265                 priv->too_many_reads ||
1266                 priv->bad_synchronization ||
1267           priv->bad_sc_read;
1268 }
1269
1270 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1271 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1272         ModelAction *lastread = get_last_action(act->get_tid());
1273         lastread->process_rmw(act);
1274         if (act->is_rmw()) {
1275           mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1276                 mo_graph->commitChanges();
1277         }
1278         return lastread;
1279 }
1280
1281 /**
1282  * A helper function for ModelExecution::check_recency, to check if the current
1283  * thread is able to read from a different write/promise for 'params.maxreads'
1284  * number of steps and if that write/promise should become visible (i.e., is
1285  * ordered later in the modification order). This helps model memory liveness.
1286  *
1287  * @param curr The current action. Must be a read.
1288  * @param rf The write/promise from which we plan to read
1289  * @param other_rf The write/promise from which we may read
1290  * @return True if we were able to read from other_rf for params.maxreads steps
1291  */
1292 template <typename T, typename U>
1293 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1294 {
1295         /* Need a different write/promise */
1296         if (other_rf->equals(rf))
1297                 return false;
1298
1299         /* Only look for "newer" writes/promises */
1300         if (!mo_graph->checkReachable(rf, other_rf))
1301                 return false;
1302
1303         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1304         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1305         action_list_t::reverse_iterator rit = list->rbegin();
1306         ASSERT((*rit) == curr);
1307         /* Skip past curr */
1308         rit++;
1309
1310         /* Does this write/promise work for everyone? */
1311         for (int i = 0; i < params->maxreads; i++, rit++) {
1312                 ModelAction *act = *rit;
1313                 if (!act->may_read_from(other_rf))
1314                         return false;
1315         }
1316         return true;
1317 }
1318
1319 /**
1320  * Checks whether a thread has read from the same write or Promise for too many
1321  * times without seeing the effects of a later write/Promise.
1322  *
1323  * Basic idea:
1324  * 1) there must a different write/promise that we could read from,
1325  * 2) we must have read from the same write/promise in excess of maxreads times,
1326  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1327  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1328  *
1329  * If so, we decide that the execution is no longer feasible.
1330  *
1331  * @param curr The current action. Must be a read.
1332  * @param rf The ModelAction/Promise from which we might read.
1333  * @return True if the read should succeed; false otherwise
1334  */
1335 template <typename T>
1336 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1337 {
1338         if (!params->maxreads)
1339                 return true;
1340
1341         //NOTE: Next check is just optimization, not really necessary....
1342         if (curr->get_node()->get_read_from_past_size() <= 1)
1343                 return true;
1344
1345         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1346         int tid = id_to_int(curr->get_tid());
1347         ASSERT(tid < (int)thrd_lists->size());
1348         action_list_t *list = &(*thrd_lists)[tid];
1349         action_list_t::reverse_iterator rit = list->rbegin();
1350         ASSERT((*rit) == curr);
1351         /* Skip past curr */
1352         rit++;
1353
1354         action_list_t::reverse_iterator ritcopy = rit;
1355         /* See if we have enough reads from the same value */
1356         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1357                 if (ritcopy == list->rend())
1358                         return true;
1359                 ModelAction *act = *ritcopy;
1360                 if (!act->is_read())
1361                         return true;
1362                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1363                         return true;
1364                 if (act->get_node()->get_read_from_past_size() <= 1)
1365                         return true;
1366         }
1367         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1368                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1369                 if (should_read_instead(curr, rf, write))
1370                         return false; /* liveness failure */
1371         }
1372         return true;
1373 }
1374
1375 /**
1376  * @brief Updates the mo_graph with the constraints imposed from the current
1377  * read.
1378  *
1379  * Basic idea is the following: Go through each other thread and find
1380  * the last action that happened before our read.  Two cases:
1381  *
1382  * -# The action is a write: that write must either occur before
1383  * the write we read from or be the write we read from.
1384  * -# The action is a read: the write that that action read from
1385  * must occur before the write we read from or be the same write.
1386  *
1387  * @param curr The current action. Must be a read.
1388  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1389  * @return True if modification order edges were added; false otherwise
1390  */
1391 template <typename rf_type>
1392 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1393 {
1394         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1395         unsigned int i;
1396         bool added = false;
1397         ASSERT(curr->is_read());
1398
1399         /* Last SC fence in the current thread */
1400         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1401         ModelAction *last_sc_write = NULL;
1402         if (curr->is_seqcst())
1403                 last_sc_write = get_last_seq_cst_write(curr);
1404
1405         /* Iterate over all threads */
1406         for (i = 0; i < thrd_lists->size(); i++) {
1407                 /* Last SC fence in thread i */
1408                 ModelAction *last_sc_fence_thread_local = NULL;
1409                 if (int_to_id((int)i) != curr->get_tid())
1410                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1411
1412                 /* Last SC fence in thread i, before last SC fence in current thread */
1413                 ModelAction *last_sc_fence_thread_before = NULL;
1414                 if (last_sc_fence_local)
1415                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1416
1417                 /* Iterate over actions in thread, starting from most recent */
1418                 action_list_t *list = &(*thrd_lists)[i];
1419                 action_list_t::reverse_iterator rit;
1420                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1421                         ModelAction *act = *rit;
1422
1423                         /* Skip curr */
1424                         if (act == curr)
1425                                 continue;
1426                         /* Don't want to add reflexive edges on 'rf' */
1427                         if (act->equals(rf)) {
1428                                 if (act->happens_before(curr))
1429                                         break;
1430                                 else
1431                                         continue;
1432                         }
1433
1434                         if (act->is_write()) {
1435                                 /* C++, Section 29.3 statement 5 */
1436                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1437                                                 *act < *last_sc_fence_thread_local) {
1438                                         added = mo_graph->addEdge(act, rf) || added;
1439                                         break;
1440                                 }
1441                                 /* C++, Section 29.3 statement 4 */
1442                                 else if (act->is_seqcst() && last_sc_fence_local &&
1443                                                 *act < *last_sc_fence_local) {
1444                                         added = mo_graph->addEdge(act, rf) || added;
1445                                         break;
1446                                 }
1447                                 /* C++, Section 29.3 statement 6 */
1448                                 else if (last_sc_fence_thread_before &&
1449                                                 *act < *last_sc_fence_thread_before) {
1450                                         added = mo_graph->addEdge(act, rf) || added;
1451                                         break;
1452                                 }
1453                         }
1454
1455                         /*
1456                          * Include at most one act per-thread that "happens
1457                          * before" curr
1458                          */
1459                         if (act->happens_before(curr)) {
1460                                 if (act->is_write()) {
1461                                         added = mo_graph->addEdge(act, rf) || added;
1462                                 } else {
1463                                         const ModelAction *prevrf = act->get_reads_from();
1464                                         if (!prevrf->equals(rf))
1465                                           added = mo_graph->addEdge(prevrf, rf) || added;
1466                                 }
1467                                 break;
1468                         }
1469                 }
1470         }
1471
1472         /*
1473          * All compatible, thread-exclusive promises must be ordered after any
1474          * concrete loads from the same thread
1475          */
1476
1477         return added;
1478 }
1479
1480 /**
1481  * Updates the mo_graph with the constraints imposed from the current write.
1482  *
1483  * Basic idea is the following: Go through each other thread and find
1484  * the lastest action that happened before our write.  Two cases:
1485  *
1486  * (1) The action is a write => that write must occur before
1487  * the current write
1488  *
1489  * (2) The action is a read => the write that that action read from
1490  * must occur before the current write.
1491  *
1492  * This method also handles two other issues:
1493  *
1494  * (I) Sequential Consistency: Making sure that if the current write is
1495  * seq_cst, that it occurs after the previous seq_cst write.
1496  *
1497  * (II) Sending the write back to non-synchronizing reads.
1498  *
1499  * @param curr The current action. Must be a write.
1500  * @param send_fv A vector for stashing reads to which we may pass our future
1501  * value. If NULL, then don't record any future values.
1502  * @return True if modification order edges were added; false otherwise
1503  */
1504 bool ModelExecution::w_modification_order(ModelAction *curr)
1505 {
1506         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1507         unsigned int i;
1508         bool added = false;
1509         ASSERT(curr->is_write());
1510
1511         if (curr->is_seqcst()) {
1512                 /* We have to at least see the last sequentially consistent write,
1513                          so we are initialized. */
1514                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1515                 if (last_seq_cst != NULL) {
1516                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1517                 }
1518         }
1519
1520         /* Last SC fence in the current thread */
1521         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1522
1523         /* Iterate over all threads */
1524         for (i = 0; i < thrd_lists->size(); i++) {
1525                 /* Last SC fence in thread i, before last SC fence in current thread */
1526                 ModelAction *last_sc_fence_thread_before = NULL;
1527                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1528                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1529
1530                 /* Iterate over actions in thread, starting from most recent */
1531                 action_list_t *list = &(*thrd_lists)[i];
1532                 action_list_t::reverse_iterator rit;
1533                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1534                         ModelAction *act = *rit;
1535                         if (act == curr) {
1536                                 /*
1537                                  * 1) If RMW and it actually read from something, then we
1538                                  * already have all relevant edges, so just skip to next
1539                                  * thread.
1540                                  *
1541                                  * 2) If RMW and it didn't read from anything, we should
1542                                  * whatever edge we can get to speed up convergence.
1543                                  *
1544                                  * 3) If normal write, we need to look at earlier actions, so
1545                                  * continue processing list.
1546                                  */
1547                                 if (curr->is_rmw()) {
1548                                         if (curr->get_reads_from() != NULL)
1549                                                 break;
1550                                         else
1551                                                 continue;
1552                                 } else
1553                                         continue;
1554                         }
1555
1556                         /* C++, Section 29.3 statement 7 */
1557                         if (last_sc_fence_thread_before && act->is_write() &&
1558                                         *act < *last_sc_fence_thread_before) {
1559                                 added = mo_graph->addEdge(act, curr) || added;
1560                                 break;
1561                         }
1562
1563                         /*
1564                          * Include at most one act per-thread that "happens
1565                          * before" curr
1566                          */
1567                         if (act->happens_before(curr)) {
1568                                 /*
1569                                  * Note: if act is RMW, just add edge:
1570                                  *   act --mo--> curr
1571                                  * The following edge should be handled elsewhere:
1572                                  *   readfrom(act) --mo--> act
1573                                  */
1574                                 if (act->is_write())
1575                                         added = mo_graph->addEdge(act, curr) || added;
1576                                 else if (act->is_read()) {
1577                                         //if previous read accessed a null, just keep going
1578                                   added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1579                                 }
1580                                 break;
1581                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1582                                                      !act->same_thread(curr)) {
1583                                 /* We have an action that:
1584                                    (1) did not happen before us
1585                                    (2) is a read and we are a write
1586                                    (3) cannot synchronize with us
1587                                    (4) is in a different thread
1588                                    =>
1589                                    that read could potentially read from our write.  Note that
1590                                    these checks are overly conservative at this point, we'll
1591                                    do more checks before actually removing the
1592                                    pendingfuturevalue.
1593
1594                                  */
1595
1596                         }
1597                 }
1598         }
1599
1600         return added;
1601 }
1602
1603 /**
1604  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1605  * some constraints. This method checks one the following constraint (others
1606  * require compiler support):
1607  *
1608  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1609  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1610  */
1611 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1612 {
1613         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1614         unsigned int i;
1615         /* Iterate over all threads */
1616         for (i = 0; i < thrd_lists->size(); i++) {
1617                 const ModelAction *write_after_read = NULL;
1618
1619                 /* Iterate over actions in thread, starting from most recent */
1620                 action_list_t *list = &(*thrd_lists)[i];
1621                 action_list_t::reverse_iterator rit;
1622                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1623                         ModelAction *act = *rit;
1624
1625                         /* Don't disallow due to act == reader */
1626                         if (!reader->happens_before(act) || reader == act)
1627                                 break;
1628                         else if (act->is_write())
1629                                 write_after_read = act;
1630                         else if (act->is_read() && act->get_reads_from() != NULL)
1631                                 write_after_read = act->get_reads_from();
1632                 }
1633
1634                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1635                         return false;
1636         }
1637         return true;
1638 }
1639
1640 /**
1641  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1642  * The ModelAction under consideration is expected to be taking part in
1643  * release/acquire synchronization as an object of the "reads from" relation.
1644  * Note that this can only provide release sequence support for RMW chains
1645  * which do not read from the future, as those actions cannot be traced until
1646  * their "promise" is fulfilled. Similarly, we may not even establish the
1647  * presence of a release sequence with certainty, as some modification order
1648  * constraints may be decided further in the future. Thus, this function
1649  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1650  * and a boolean representing certainty.
1651  *
1652  * @param rf The action that might be part of a release sequence. Must be a
1653  * write.
1654  * @param release_heads A pass-by-reference style return parameter. After
1655  * execution of this function, release_heads will contain the heads of all the
1656  * relevant release sequences, if any exists with certainty
1657  * @param pending A pass-by-reference style return parameter which is only used
1658  * when returning false (i.e., uncertain). Returns most information regarding
1659  * an uncertain release sequence, including any write operations that might
1660  * break the sequence.
1661  * @return true, if the ModelExecution is certain that release_heads is complete;
1662  * false otherwise
1663  */
1664 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1665                 rel_heads_list_t *release_heads,
1666                 struct release_seq *pending) const
1667 {
1668         /* Only check for release sequences if there are no cycles */
1669         if (mo_graph->checkForCycles())
1670                 return false;
1671
1672         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1673                 ASSERT(rf->is_write());
1674
1675                 if (rf->is_release())
1676                         release_heads->push_back(rf);
1677                 else if (rf->get_last_fence_release())
1678                         release_heads->push_back(rf->get_last_fence_release());
1679                 if (!rf->is_rmw())
1680                         break; /* End of RMW chain */
1681
1682                 /** @todo Need to be smarter here...  In the linux lock
1683                  * example, this will run to the beginning of the program for
1684                  * every acquire. */
1685                 /** @todo The way to be smarter here is to keep going until 1
1686                  * thread has a release preceded by an acquire and you've seen
1687                  *       both. */
1688
1689                 /* acq_rel RMW is a sufficient stopping condition */
1690                 if (rf->is_acquire() && rf->is_release())
1691                         return true; /* complete */
1692         };
1693         if (!rf) {
1694                 /* read from future: need to settle this later */
1695                 pending->rf = NULL;
1696                 return false; /* incomplete */
1697         }
1698
1699         if (rf->is_release())
1700                 return true; /* complete */
1701
1702         /* else relaxed write
1703          * - check for fence-release in the same thread (29.8, stmt. 3)
1704          * - check modification order for contiguous subsequence
1705          *   -> rf must be same thread as release */
1706
1707         const ModelAction *fence_release = rf->get_last_fence_release();
1708         /* Synchronize with a fence-release unconditionally; we don't need to
1709          * find any more "contiguous subsequence..." for it */
1710         if (fence_release)
1711                 release_heads->push_back(fence_release);
1712
1713         int tid = id_to_int(rf->get_tid());
1714         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1715         action_list_t *list = &(*thrd_lists)[tid];
1716         action_list_t::const_reverse_iterator rit;
1717
1718         /* Find rf in the thread list */
1719         rit = std::find(list->rbegin(), list->rend(), rf);
1720         ASSERT(rit != list->rend());
1721
1722         /* Find the last {write,fence}-release */
1723         for (; rit != list->rend(); rit++) {
1724                 if (fence_release && *(*rit) < *fence_release)
1725                         break;
1726                 if ((*rit)->is_release())
1727                         break;
1728         }
1729         if (rit == list->rend()) {
1730                 /* No write-release in this thread */
1731                 return true; /* complete */
1732         } else if (fence_release && *(*rit) < *fence_release) {
1733                 /* The fence-release is more recent (and so, "stronger") than
1734                  * the most recent write-release */
1735                 return true; /* complete */
1736         } /* else, need to establish contiguous release sequence */
1737         ModelAction *release = *rit;
1738
1739         ASSERT(rf->same_thread(release));
1740
1741         pending->writes.clear();
1742
1743         bool certain = true;
1744         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1745                 if (id_to_int(rf->get_tid()) == (int)i)
1746                         continue;
1747                 list = &(*thrd_lists)[i];
1748
1749                 /* Can we ensure no future writes from this thread may break
1750                  * the release seq? */
1751                 bool future_ordered = false;
1752
1753                 ModelAction *last = get_last_action(int_to_id(i));
1754                 Thread *th = get_thread(int_to_id(i));
1755                 if ((last && rf->happens_before(last)) ||
1756                                 !is_enabled(th) ||
1757                                 th->is_complete())
1758                         future_ordered = true;
1759
1760                 ASSERT(!th->is_model_thread() || future_ordered);
1761
1762                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1763                         const ModelAction *act = *rit;
1764                         /* Reach synchronization -> this thread is complete */
1765                         if (act->happens_before(release))
1766                                 break;
1767                         if (rf->happens_before(act)) {
1768                                 future_ordered = true;
1769                                 continue;
1770                         }
1771
1772                         /* Only non-RMW writes can break release sequences */
1773                         if (!act->is_write() || act->is_rmw())
1774                                 continue;
1775
1776                         /* Check modification order */
1777                         if (mo_graph->checkReachable(rf, act)) {
1778                                 /* rf --mo--> act */
1779                                 future_ordered = true;
1780                                 continue;
1781                         }
1782                         if (mo_graph->checkReachable(act, release))
1783                                 /* act --mo--> release */
1784                                 break;
1785                         if (mo_graph->checkReachable(release, act) &&
1786                                       mo_graph->checkReachable(act, rf)) {
1787                                 /* release --mo-> act --mo--> rf */
1788                                 return true; /* complete */
1789                         }
1790                         /* act may break release sequence */
1791                         pending->writes.push_back(act);
1792                         certain = false;
1793                 }
1794                 if (!future_ordered)
1795                         certain = false; /* This thread is uncertain */
1796         }
1797
1798         if (certain) {
1799                 release_heads->push_back(release);
1800                 pending->writes.clear();
1801         } else {
1802                 pending->release = release;
1803                 pending->rf = rf;
1804         }
1805         return certain;
1806 }
1807
1808 /**
1809  * An interface for getting the release sequence head(s) with which a
1810  * given ModelAction must synchronize. This function only returns a non-empty
1811  * result when it can locate a release sequence head with certainty. Otherwise,
1812  * it may mark the internal state of the ModelExecution so that it will handle
1813  * the release sequence at a later time, causing @a acquire to update its
1814  * synchronization at some later point in execution.
1815  *
1816  * @param acquire The 'acquire' action that may synchronize with a release
1817  * sequence
1818  * @param read The read action that may read from a release sequence; this may
1819  * be the same as acquire, or else an earlier action in the same thread (i.e.,
1820  * when 'acquire' is a fence-acquire)
1821  * @param release_heads A pass-by-reference return parameter. Will be filled
1822  * with the head(s) of the release sequence(s), if they exists with certainty.
1823  * @see ModelExecution::release_seq_heads
1824  */
1825 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
1826                 ModelAction *read, rel_heads_list_t *release_heads)
1827 {
1828         const ModelAction *rf = read->get_reads_from();
1829         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1830         sequence->acquire = acquire;
1831         sequence->read = read;
1832
1833         if (!release_seq_heads(rf, release_heads, sequence)) {
1834                 /* add act to 'lazy checking' list */
1835                 pending_rel_seqs.push_back(sequence);
1836         } else {
1837                 snapshot_free(sequence);
1838         }
1839 }
1840
1841 /**
1842  * @brief Propagate a modified clock vector to actions later in the execution
1843  * order
1844  *
1845  * After an acquire operation lazily completes a release-sequence
1846  * synchronization, we must update all clock vectors for operations later than
1847  * the acquire in the execution order.
1848  *
1849  * @param acquire The ModelAction whose clock vector must be propagated
1850  * @param work The work queue to which we can add work items, if this
1851  * propagation triggers more updates (e.g., to the modification order)
1852  */
1853 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
1854 {
1855         /* Re-check all pending release sequences */
1856         work->push_back(CheckRelSeqWorkEntry(NULL));
1857         /* Re-check read-acquire for mo_graph edges */
1858         work->push_back(MOEdgeWorkEntry(acquire));
1859
1860         /* propagate synchronization to later actions */
1861         action_list_t::reverse_iterator rit = action_trace.rbegin();
1862         for (; (*rit) != acquire; rit++) {
1863                 ModelAction *propagate = *rit;
1864                 if (acquire->happens_before(propagate)) {
1865                         synchronize(acquire, propagate);
1866                         /* Re-check 'propagate' for mo_graph edges */
1867                         work->push_back(MOEdgeWorkEntry(propagate));
1868                 }
1869         }
1870 }
1871
1872 /**
1873  * Attempt to resolve all stashed operations that might synchronize with a
1874  * release sequence for a given location. This implements the "lazy" portion of
1875  * determining whether or not a release sequence was contiguous, since not all
1876  * modification order information is present at the time an action occurs.
1877  *
1878  * @param location The location/object that should be checked for release
1879  * sequence resolutions. A NULL value means to check all locations.
1880  * @param work_queue The work queue to which to add work items as they are
1881  * generated
1882  * @return True if any updates occurred (new synchronization, new mo_graph
1883  * edges)
1884  */
1885 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
1886 {
1887         bool updated = false;
1888         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
1889         while (it != pending_rel_seqs.end()) {
1890                 struct release_seq *pending = *it;
1891                 ModelAction *acquire = pending->acquire;
1892                 const ModelAction *read = pending->read;
1893
1894                 /* Only resolve sequences on the given location, if provided */
1895                 if (location && read->get_location() != location) {
1896                         it++;
1897                         continue;
1898                 }
1899
1900                 const ModelAction *rf = read->get_reads_from();
1901                 rel_heads_list_t release_heads;
1902                 bool complete;
1903                 complete = release_seq_heads(rf, &release_heads, pending);
1904                 for (unsigned int i = 0; i < release_heads.size(); i++)
1905                         if (!acquire->has_synchronized_with(release_heads[i]))
1906                                 if (synchronize(release_heads[i], acquire))
1907                                         updated = true;
1908
1909                 if (updated) {
1910                         /* Propagate the changed clock vector */
1911                         propagate_clockvector(acquire, work_queue);
1912                 }
1913                 if (complete) {
1914                         it = pending_rel_seqs.erase(it);
1915                         snapshot_free(pending);
1916                 } else {
1917                         it++;
1918                 }
1919         }
1920
1921         // If we resolved promises or data races, see if we have realized a data race.
1922         checkDataRaces();
1923
1924         return updated;
1925 }
1926
1927 /**
1928  * Performs various bookkeeping operations for the current ModelAction. For
1929  * instance, adds action to the per-object, per-thread action vector and to the
1930  * action trace list of all thread actions.
1931  *
1932  * @param act is the ModelAction to add.
1933  */
1934 void ModelExecution::add_action_to_lists(ModelAction *act)
1935 {
1936         int tid = id_to_int(act->get_tid());
1937         ModelAction *uninit = NULL;
1938         int uninit_id = -1;
1939         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1940         if (list->empty() && act->is_atomic_var()) {
1941                 uninit = get_uninitialized_action(act);
1942                 uninit_id = id_to_int(uninit->get_tid());
1943                 list->push_front(uninit);
1944         }
1945         list->push_back(act);
1946
1947         action_trace.push_back(act);
1948         if (uninit)
1949                 action_trace.push_front(uninit);
1950
1951         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1952         if (tid >= (int)vec->size())
1953                 vec->resize(priv->next_thread_id);
1954         (*vec)[tid].push_back(act);
1955         if (uninit)
1956                 (*vec)[uninit_id].push_front(uninit);
1957
1958         if ((int)thrd_last_action.size() <= tid)
1959                 thrd_last_action.resize(get_num_threads());
1960         thrd_last_action[tid] = act;
1961         if (uninit)
1962                 thrd_last_action[uninit_id] = uninit;
1963
1964         if (act->is_fence() && act->is_release()) {
1965                 if ((int)thrd_last_fence_release.size() <= tid)
1966                         thrd_last_fence_release.resize(get_num_threads());
1967                 thrd_last_fence_release[tid] = act;
1968         }
1969
1970         if (act->is_wait()) {
1971                 void *mutex_loc = (void *) act->get_value();
1972                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1973
1974                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1975                 if (tid >= (int)vec->size())
1976                         vec->resize(priv->next_thread_id);
1977                 (*vec)[tid].push_back(act);
1978         }
1979 }
1980
1981 /**
1982  * @brief Get the last action performed by a particular Thread
1983  * @param tid The thread ID of the Thread in question
1984  * @return The last action in the thread
1985  */
1986 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1987 {
1988         int threadid = id_to_int(tid);
1989         if (threadid < (int)thrd_last_action.size())
1990                 return thrd_last_action[id_to_int(tid)];
1991         else
1992                 return NULL;
1993 }
1994
1995 /**
1996  * @brief Get the last fence release performed by a particular Thread
1997  * @param tid The thread ID of the Thread in question
1998  * @return The last fence release in the thread, if one exists; NULL otherwise
1999  */
2000 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2001 {
2002         int threadid = id_to_int(tid);
2003         if (threadid < (int)thrd_last_fence_release.size())
2004                 return thrd_last_fence_release[id_to_int(tid)];
2005         else
2006                 return NULL;
2007 }
2008
2009 /**
2010  * Gets the last memory_order_seq_cst write (in the total global sequence)
2011  * performed on a particular object (i.e., memory location), not including the
2012  * current action.
2013  * @param curr The current ModelAction; also denotes the object location to
2014  * check
2015  * @return The last seq_cst write
2016  */
2017 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2018 {
2019         void *location = curr->get_location();
2020         action_list_t *list = obj_map.get(location);
2021         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2022         action_list_t::reverse_iterator rit;
2023         for (rit = list->rbegin(); (*rit) != curr; rit++)
2024                 ;
2025         rit++; /* Skip past curr */
2026         for ( ; rit != list->rend(); rit++)
2027                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2028                         return *rit;
2029         return NULL;
2030 }
2031
2032 /**
2033  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2034  * performed in a particular thread, prior to a particular fence.
2035  * @param tid The ID of the thread to check
2036  * @param before_fence The fence from which to begin the search; if NULL, then
2037  * search for the most recent fence in the thread.
2038  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2039  */
2040 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2041 {
2042         /* All fences should have location FENCE_LOCATION */
2043         action_list_t *list = obj_map.get(FENCE_LOCATION);
2044
2045         if (!list)
2046                 return NULL;
2047
2048         action_list_t::reverse_iterator rit = list->rbegin();
2049
2050         if (before_fence) {
2051                 for (; rit != list->rend(); rit++)
2052                         if (*rit == before_fence)
2053                                 break;
2054
2055                 ASSERT(*rit == before_fence);
2056                 rit++;
2057         }
2058
2059         for (; rit != list->rend(); rit++)
2060                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2061                         return *rit;
2062         return NULL;
2063 }
2064
2065 /**
2066  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2067  * location). This function identifies the mutex according to the current
2068  * action, which is presumed to perform on the same mutex.
2069  * @param curr The current ModelAction; also denotes the object location to
2070  * check
2071  * @return The last unlock operation
2072  */
2073 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2074 {
2075         void *location = curr->get_location();
2076
2077         action_list_t *list = obj_map.get(location);
2078         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2079         action_list_t::reverse_iterator rit;
2080         for (rit = list->rbegin(); rit != list->rend(); rit++)
2081                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2082                         return *rit;
2083         return NULL;
2084 }
2085
2086 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2087 {
2088         ModelAction *parent = get_last_action(tid);
2089         if (!parent)
2090                 parent = get_thread(tid)->get_creation();
2091         return parent;
2092 }
2093
2094 /**
2095  * Returns the clock vector for a given thread.
2096  * @param tid The thread whose clock vector we want
2097  * @return Desired clock vector
2098  */
2099 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2100 {
2101         return get_parent_action(tid)->get_cv();
2102 }
2103
2104 /**
2105  * Compute the set of writes that may break the current pending release
2106  * sequence. This information is extracted from previou release sequence
2107  * calculations.
2108  *
2109  * @param curr The current ModelAction. Must be a release sequence fixup
2110  * action.
2111  */
2112 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2113 {
2114         if (pending_rel_seqs.empty())
2115                 return;
2116
2117         struct release_seq *pending = pending_rel_seqs.back();
2118         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2119                 const ModelAction *write = pending->writes[i];
2120                 curr->get_node()->add_relseq_break(write);
2121         }
2122
2123         /* NULL means don't break the sequence; just synchronize */
2124         curr->get_node()->add_relseq_break(NULL);
2125 }
2126
2127 /**
2128  * Build up an initial set of all past writes that this 'read' action may read
2129  * from, as well as any previously-observed future values that must still be valid.
2130  *
2131  * @param curr is the current ModelAction that we are exploring; it must be a
2132  * 'read' operation.
2133  */
2134 void ModelExecution::build_may_read_from(ModelAction *curr)
2135 {
2136         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2137         unsigned int i;
2138         ASSERT(curr->is_read());
2139
2140         ModelAction *last_sc_write = NULL;
2141
2142         if (curr->is_seqcst())
2143                 last_sc_write = get_last_seq_cst_write(curr);
2144
2145         /* Iterate over all threads */
2146         for (i = 0; i < thrd_lists->size(); i++) {
2147                 /* Iterate over actions in thread, starting from most recent */
2148                 action_list_t *list = &(*thrd_lists)[i];
2149                 action_list_t::reverse_iterator rit;
2150                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2151                         ModelAction *act = *rit;
2152
2153                         /* Only consider 'write' actions */
2154                         if (!act->is_write() || act == curr)
2155                                 continue;
2156
2157                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2158                         bool allow_read = true;
2159
2160                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2161                                 allow_read = false;
2162                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2163                                 allow_read = false;
2164
2165                         if (allow_read) {
2166                                 /* Only add feasible reads */
2167                                 mo_graph->startChanges();
2168                                 r_modification_order(curr, act);
2169                                 if (!is_infeasible())
2170                                         curr->get_node()->add_read_from_past(act);
2171                                 mo_graph->rollbackChanges();
2172                         }
2173
2174                         /* Include at most one act per-thread that "happens before" curr */
2175                         if (act->happens_before(curr))
2176                                 break;
2177                 }
2178         }
2179
2180         /* We may find no valid may-read-from only if the execution is doomed */
2181         if (!curr->get_node()->read_from_size()) {
2182                 priv->no_valid_reads = true;
2183                 set_assert();
2184         }
2185
2186         if (DBG_ENABLED()) {
2187                 model_print("Reached read action:\n");
2188                 curr->print();
2189                 model_print("Printing read_from_past\n");
2190                 curr->get_node()->print_read_from_past();
2191                 model_print("End printing read_from_past\n");
2192         }
2193 }
2194
2195 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2196 {
2197         for ( ; write != NULL; write = write->get_reads_from()) {
2198                 /* UNINIT actions don't have a Node, and they never sleep */
2199                 if (write->is_uninitialized())
2200                         return true;
2201                 Node *prevnode = write->get_node()->get_parent();
2202
2203                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2204                 if (write->is_release() && thread_sleep)
2205                         return true;
2206                 if (!write->is_rmw())
2207                         return false;
2208         }
2209         return true;
2210 }
2211
2212 /**
2213  * @brief Get an action representing an uninitialized atomic
2214  *
2215  * This function may create a new one or try to retrieve one from the NodeStack
2216  *
2217  * @param curr The current action, which prompts the creation of an UNINIT action
2218  * @return A pointer to the UNINIT ModelAction
2219  */
2220 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2221 {
2222         Node *node = curr->get_node();
2223         ModelAction *act = node->get_uninit_action();
2224         if (!act) {
2225                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2226                 node->set_uninit_action(act);
2227         }
2228         act->create_cv(NULL);
2229         return act;
2230 }
2231
2232 static void print_list(const action_list_t *list)
2233 {
2234         action_list_t::const_iterator it;
2235
2236         model_print("------------------------------------------------------------------------------------\n");
2237         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2238         model_print("------------------------------------------------------------------------------------\n");
2239
2240         unsigned int hash = 0;
2241
2242         for (it = list->begin(); it != list->end(); it++) {
2243                 const ModelAction *act = *it;
2244                 if (act->get_seq_number() > 0)
2245                         act->print();
2246                 hash = hash^(hash<<3)^((*it)->hash());
2247         }
2248         model_print("HASH %u\n", hash);
2249         model_print("------------------------------------------------------------------------------------\n");
2250 }
2251
2252 #if SUPPORT_MOD_ORDER_DUMP
2253 void ModelExecution::dumpGraph(char *filename) const
2254 {
2255         char buffer[200];
2256         sprintf(buffer, "%s.dot", filename);
2257         FILE *file = fopen(buffer, "w");
2258         fprintf(file, "digraph %s {\n", filename);
2259         mo_graph->dumpNodes(file);
2260         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2261
2262         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2263                 ModelAction *act = *it;
2264                 if (act->is_read()) {
2265                         mo_graph->dot_print_node(file, act);
2266                         mo_graph->dot_print_edge(file,
2267                                                 act->get_reads_from(),
2268                                                 act,
2269                                                 "label=\"rf\", color=red, weight=2");
2270                 }
2271                 if (thread_array[act->get_tid()]) {
2272                         mo_graph->dot_print_edge(file,
2273                                         thread_array[id_to_int(act->get_tid())],
2274                                         act,
2275                                         "label=\"sb\", color=blue, weight=400");
2276                 }
2277
2278                 thread_array[act->get_tid()] = act;
2279         }
2280         fprintf(file, "}\n");
2281         model_free(thread_array);
2282         fclose(file);
2283 }
2284 #endif
2285
2286 /** @brief Prints an execution trace summary. */
2287 void ModelExecution::print_summary() const
2288 {
2289 #if SUPPORT_MOD_ORDER_DUMP
2290         char buffername[100];
2291         sprintf(buffername, "exec%04u", get_execution_number());
2292         mo_graph->dumpGraphToFile(buffername);
2293         sprintf(buffername, "graph%04u", get_execution_number());
2294         dumpGraph(buffername);
2295 #endif
2296
2297         model_print("Execution trace %d:", get_execution_number());
2298         if (isfeasibleprefix()) {
2299                 if (is_yieldblocked())
2300                         model_print(" YIELD BLOCKED");
2301                 if (scheduler->all_threads_sleeping())
2302                         model_print(" SLEEP-SET REDUNDANT");
2303                 if (have_bug_reports())
2304                         model_print(" DETECTED BUG(S)");
2305         } else
2306                 print_infeasibility(" INFEASIBLE");
2307         model_print("\n");
2308
2309         print_list(&action_trace);
2310         model_print("\n");
2311
2312 }
2313
2314 /**
2315  * Add a Thread to the system for the first time. Should only be called once
2316  * per thread.
2317  * @param t The Thread to add
2318  */
2319 void ModelExecution::add_thread(Thread *t)
2320 {
2321         unsigned int i = id_to_int(t->get_id());
2322         if (i >= thread_map.size())
2323                 thread_map.resize(i + 1);
2324         thread_map[i] = t;
2325         if (!t->is_model_thread())
2326                 scheduler->add_thread(t);
2327 }
2328
2329 /**
2330  * @brief Get a Thread reference by its ID
2331  * @param tid The Thread's ID
2332  * @return A Thread reference
2333  */
2334 Thread * ModelExecution::get_thread(thread_id_t tid) const
2335 {
2336         unsigned int i = id_to_int(tid);
2337         if (i < thread_map.size())
2338                 return thread_map[i];
2339         return NULL;
2340 }
2341
2342 /**
2343  * @brief Get a reference to the Thread in which a ModelAction was executed
2344  * @param act The ModelAction
2345  * @return A Thread reference
2346  */
2347 Thread * ModelExecution::get_thread(const ModelAction *act) const
2348 {
2349         return get_thread(act->get_tid());
2350 }
2351
2352 /**
2353  * @brief Get a Thread reference by its pthread ID
2354  * @param index The pthread's ID
2355  * @return A Thread reference
2356  */
2357 Thread * ModelExecution::get_pthread(pthread_t pid) {
2358         if (pid < pthread_counter + 1) return pthread_map[pid];
2359         else return NULL;
2360 }
2361
2362 /**
2363  * @brief Check if a Thread is currently enabled
2364  * @param t The Thread to check
2365  * @return True if the Thread is currently enabled
2366  */
2367 bool ModelExecution::is_enabled(Thread *t) const
2368 {
2369         return scheduler->is_enabled(t);
2370 }
2371
2372 /**
2373  * @brief Check if a Thread is currently enabled
2374  * @param tid The ID of the Thread to check
2375  * @return True if the Thread is currently enabled
2376  */
2377 bool ModelExecution::is_enabled(thread_id_t tid) const
2378 {
2379         return scheduler->is_enabled(tid);
2380 }
2381
2382 /**
2383  * @brief Select the next thread to execute based on the curren action
2384  *
2385  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2386  * actions should be followed by the execution of their child thread. In either
2387  * case, the current action should determine the next thread schedule.
2388  *
2389  * @param curr The current action
2390  * @return The next thread to run, if the current action will determine this
2391  * selection; otherwise NULL
2392  */
2393 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2394 {
2395         /* Do not split atomic RMW */
2396         if (curr->is_rmwr())
2397                 return get_thread(curr);
2398         if (curr->is_write()) {
2399 //              std::memory_order order = curr->get_mo(); 
2400 //              switch(order) {
2401 //                      case std::memory_order_relaxed: 
2402 //                              return get_thread(curr);
2403 //                      case std::memory_order_release:
2404 //                              return get_thread(curr);
2405 //                      defalut:
2406 //                              return NULL;
2407 //              }       
2408                 return NULL;
2409         }
2410
2411         /* Follow CREATE with the created thread */
2412         /* which is not needed, because model.cc takes care of this */
2413         if (curr->get_type() == THREAD_CREATE)
2414                 return curr->get_thread_operand(); 
2415         if (curr->get_type() == PTHREAD_CREATE) {
2416                 return curr->get_thread_operand();
2417         }
2418         return NULL;
2419 }
2420
2421 /** @return True if the execution has taken too many steps */
2422 bool ModelExecution::too_many_steps() const
2423 {
2424         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2425 }
2426
2427 /**
2428  * Takes the next step in the execution, if possible.
2429  * @param curr The current step to take
2430  * @return Returns the next Thread to run, if any; NULL if this execution
2431  * should terminate
2432  */
2433 Thread * ModelExecution::take_step(ModelAction *curr)
2434 {
2435         Thread *curr_thrd = get_thread(curr);
2436         ASSERT(curr_thrd->get_state() == THREAD_READY);
2437
2438         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2439         curr = check_current_action(curr);
2440         ASSERT(curr);
2441
2442         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2443                 scheduler->remove_thread(curr_thrd);
2444
2445         return action_select_next_thread(curr);
2446 }
2447
2448 /**
2449  * Launch end-of-execution release sequence fixups only when
2450  * the execution is otherwise feasible AND there are:
2451  *
2452  * (1) pending release sequences
2453  * (2) pending assertions that could be invalidated by a change
2454  * in clock vectors (i.e., data races)
2455  * (3) no pending promises
2456  */
2457 void ModelExecution::fixup_release_sequences()
2458 {
2459         while (!pending_rel_seqs.empty() &&
2460                         is_feasible_prefix_ignore_relseq() &&
2461                         haveUnrealizedRaces()) {
2462                 model_print("*** WARNING: release sequence fixup action "
2463                                 "(%zu pending release seuqence(s)) ***\n",
2464                                 pending_rel_seqs.size());
2465                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2466                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2467                                 model_thread);
2468                 take_step(fixup);
2469         };
2470 }