add a new data structure in execution.h, which is used by history.cc to link FuncInsts
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
17 #include "history.h"
18 #include "fuzzer.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 bugs(),
31                 bad_synchronization(false),
32                 asserted(false)
33         { }
34
35         ~model_snapshot_members() {
36                 for (unsigned int i = 0;i < bugs.size();i++)
37                         delete bugs[i];
38                 bugs.clear();
39         }
40
41         unsigned int next_thread_id;
42         modelclock_t used_sequence_numbers;
43         SnapVector<bug_message *> bugs;
44         /** @brief Incorrectly-ordered synchronization was made */
45         bool bad_synchronization;
46         bool asserted;
47
48         SNAPSHOTALLOC
49 };
50
51 /** @brief Constructor */
52 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler, NodeStack *node_stack) :
53         model(m),
54         params(NULL),
55         scheduler(scheduler),
56         action_trace(),
57         thread_map(2),  /* We'll always need at least 2 threads */
58         pthread_map(0),
59         pthread_counter(1),
60         obj_map(),
61         condvar_waiters_map(),
62         obj_thrd_map(),
63         mutex_map(),
64         thrd_last_action(1),
65         thrd_last_fence_release(),
66         node_stack(node_stack),
67         priv(new struct model_snapshot_members ()),
68         mo_graph(new CycleGraph()),
69         fuzzer(new Fuzzer()),
70         thrd_func_list(),
71         thrd_func_inst_lists()
72 {
73         /* Initialize a model-checker thread, for special ModelActions */
74         model_thread = new Thread(get_next_id());
75         add_thread(model_thread);
76         scheduler->register_engine(this);
77         node_stack->register_engine(this);
78 }
79
80 /** @brief Destructor */
81 ModelExecution::~ModelExecution()
82 {
83         for (unsigned int i = 0;i < get_num_threads();i++)
84                 delete get_thread(int_to_id(i));
85
86         delete mo_graph;
87         delete priv;
88 }
89
90 int ModelExecution::get_execution_number() const
91 {
92         return model->get_execution_number();
93 }
94
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
96 {
97         action_list_t *tmp = hash->get(ptr);
98         if (tmp == NULL) {
99                 tmp = new action_list_t();
100                 hash->put(ptr, tmp);
101         }
102         return tmp;
103 }
104
105 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
106 {
107         SnapVector<action_list_t> *tmp = hash->get(ptr);
108         if (tmp == NULL) {
109                 tmp = new SnapVector<action_list_t>();
110                 hash->put(ptr, tmp);
111         }
112         return tmp;
113 }
114
115 /** @return a thread ID for a new Thread */
116 thread_id_t ModelExecution::get_next_id()
117 {
118         return priv->next_thread_id++;
119 }
120
121 /** @return the number of user threads created during this execution */
122 unsigned int ModelExecution::get_num_threads() const
123 {
124         return priv->next_thread_id;
125 }
126
127 /** @return a sequence number for a new ModelAction */
128 modelclock_t ModelExecution::get_next_seq_num()
129 {
130         return ++priv->used_sequence_numbers;
131 }
132
133 /**
134  * @brief Should the current action wake up a given thread?
135  *
136  * @param curr The current action
137  * @param thread The thread that we might wake up
138  * @return True, if we should wake up the sleeping thread; false otherwise
139  */
140 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
141 {
142         const ModelAction *asleep = thread->get_pending();
143         /* Don't allow partial RMW to wake anyone up */
144         if (curr->is_rmwr())
145                 return false;
146         /* Synchronizing actions may have been backtracked */
147         if (asleep->could_synchronize_with(curr))
148                 return true;
149         /* All acquire/release fences and fence-acquire/store-release */
150         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
151                 return true;
152         /* Fence-release + store can awake load-acquire on the same location */
153         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
154                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
155                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
156                         return true;
157         }
158         return false;
159 }
160
161 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
162 {
163         for (unsigned int i = 0;i < get_num_threads();i++) {
164                 Thread *thr = get_thread(int_to_id(i));
165                 if (scheduler->is_sleep_set(thr)) {
166                         if (should_wake_up(curr, thr))
167                                 /* Remove this thread from sleep set */
168                                 scheduler->remove_sleep(thr);
169                 }
170         }
171 }
172
173 /** @brief Alert the model-checker that an incorrectly-ordered
174  * synchronization was made */
175 void ModelExecution::set_bad_synchronization()
176 {
177         priv->bad_synchronization = true;
178 }
179
180 bool ModelExecution::assert_bug(const char *msg)
181 {
182         priv->bugs.push_back(new bug_message(msg));
183
184         if (isfeasibleprefix()) {
185                 set_assert();
186                 return true;
187         }
188         return false;
189 }
190
191 /** @return True, if any bugs have been reported for this execution */
192 bool ModelExecution::have_bug_reports() const
193 {
194         return priv->bugs.size() != 0;
195 }
196
197 SnapVector<bug_message *> * ModelExecution::get_bugs() const
198 {
199         return &priv->bugs;
200 }
201
202 /**
203  * Check whether the current trace has triggered an assertion which should halt
204  * its execution.
205  *
206  * @return True, if the execution should be aborted; false otherwise
207  */
208 bool ModelExecution::has_asserted() const
209 {
210         return priv->asserted;
211 }
212
213 /**
214  * Trigger a trace assertion which should cause this execution to be halted.
215  * This can be due to a detected bug or due to an infeasibility that should
216  * halt ASAP.
217  */
218 void ModelExecution::set_assert()
219 {
220         priv->asserted = true;
221 }
222
223 /**
224  * Check if we are in a deadlock. Should only be called at the end of an
225  * execution, although it should not give false positives in the middle of an
226  * execution (there should be some ENABLED thread).
227  *
228  * @return True if program is in a deadlock; false otherwise
229  */
230 bool ModelExecution::is_deadlocked() const
231 {
232         bool blocking_threads = false;
233         for (unsigned int i = 0;i < get_num_threads();i++) {
234                 thread_id_t tid = int_to_id(i);
235                 if (is_enabled(tid))
236                         return false;
237                 Thread *t = get_thread(tid);
238                 if (!t->is_model_thread() && t->get_pending())
239                         blocking_threads = true;
240         }
241         return blocking_threads;
242 }
243
244 /**
245  * Check if this is a complete execution. That is, have all thread completed
246  * execution (rather than exiting because sleep sets have forced a redundant
247  * execution).
248  *
249  * @return True if the execution is complete.
250  */
251 bool ModelExecution::is_complete_execution() const
252 {
253         for (unsigned int i = 0;i < get_num_threads();i++)
254                 if (is_enabled(int_to_id(i)))
255                         return false;
256         return true;
257 }
258
259
260 /**
261  * Processes a read model action.
262  * @param curr is the read model action to process.
263  * @param rf_set is the set of model actions we can possibly read from
264  * @return True if processing this read updates the mo_graph.
265  */
266 void ModelExecution::process_read(ModelAction *curr, SnapVector<const ModelAction *> * rf_set)
267 {
268         SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
269         while(true) {
270
271                 int index = fuzzer->selectWrite(curr, rf_set);
272                 const ModelAction *rf = (*rf_set)[index];
273
274
275                 ASSERT(rf);
276                 bool canprune = false;
277                 if (r_modification_order(curr, rf, priorset, &canprune)) {
278                         for(unsigned int i=0;i<priorset->size();i++) {
279                                 mo_graph->addEdge((*priorset)[i], rf);
280                         }
281                         read_from(curr, rf);
282                         get_thread(curr)->set_return_value(curr->get_return_value());
283                         delete priorset;
284                         if (canprune && curr->get_type() == ATOMIC_READ) {
285                                 int tid = id_to_int(curr->get_tid());
286                                 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
287                         }
288                         return;
289                 }
290                 priorset->clear();
291                 (*rf_set)[index] = rf_set->back();
292                 rf_set->pop_back();
293         }
294 }
295
296 /**
297  * Processes a lock, trylock, or unlock model action.  @param curr is
298  * the read model action to process.
299  *
300  * The try lock operation checks whether the lock is taken.  If not,
301  * it falls to the normal lock operation case.  If so, it returns
302  * fail.
303  *
304  * The lock operation has already been checked that it is enabled, so
305  * it just grabs the lock and synchronizes with the previous unlock.
306  *
307  * The unlock operation has to re-enable all of the threads that are
308  * waiting on the lock.
309  *
310  * @return True if synchronization was updated; false otherwise
311  */
312 bool ModelExecution::process_mutex(ModelAction *curr)
313 {
314         cdsc::mutex *mutex = curr->get_mutex();
315         struct cdsc::mutex_state *state = NULL;
316
317         if (mutex)
318                 state = mutex->get_state();
319
320         switch (curr->get_type()) {
321         case ATOMIC_TRYLOCK: {
322                 bool success = !state->locked;
323                 curr->set_try_lock(success);
324                 if (!success) {
325                         get_thread(curr)->set_return_value(0);
326                         break;
327                 }
328                 get_thread(curr)->set_return_value(1);
329         }
330         //otherwise fall into the lock case
331         case ATOMIC_LOCK: {
332                 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
333                 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
334                 //      assert_bug("Lock access before initialization");
335                 state->locked = get_thread(curr);
336                 ModelAction *unlock = get_last_unlock(curr);
337                 //synchronize with the previous unlock statement
338                 if (unlock != NULL) {
339                         synchronize(unlock, curr);
340                         return true;
341                 }
342                 break;
343         }
344         case ATOMIC_WAIT:
345         case ATOMIC_UNLOCK: {
346                 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T...  TIMED WAITS MUST EVENMTUALLY RELEASE...
347
348                 /* wake up the other threads */
349                 for (unsigned int i = 0;i < get_num_threads();i++) {
350                         Thread *t = get_thread(int_to_id(i));
351                         Thread *curr_thrd = get_thread(curr);
352                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
353                                 scheduler->wake(t);
354                 }
355
356                 /* unlock the lock - after checking who was waiting on it */
357                 state->locked = NULL;
358
359                 if (!curr->is_wait())
360                         break;/* The rest is only for ATOMIC_WAIT */
361
362                 break;
363         }
364         case ATOMIC_NOTIFY_ALL: {
365                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
366                 //activate all the waiting threads
367                 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
368                         scheduler->wake(get_thread(*rit));
369                 }
370                 waiters->clear();
371                 break;
372         }
373         case ATOMIC_NOTIFY_ONE: {
374                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
375                 if (waiters->size() != 0) {
376                         Thread * thread = fuzzer->selectNotify(waiters);
377                         scheduler->wake(thread);
378                 }
379                 break;
380         }
381
382         default:
383                 ASSERT(0);
384         }
385         return false;
386 }
387
388 /**
389  * Process a write ModelAction
390  * @param curr The ModelAction to process
391  * @return True if the mo_graph was updated or promises were resolved
392  */
393 void ModelExecution::process_write(ModelAction *curr)
394 {
395         w_modification_order(curr);
396         get_thread(curr)->set_return_value(VALUE_NONE);
397 }
398
399 /**
400  * Process a fence ModelAction
401  * @param curr The ModelAction to process
402  * @return True if synchronization was updated
403  */
404 bool ModelExecution::process_fence(ModelAction *curr)
405 {
406         /*
407          * fence-relaxed: no-op
408          * fence-release: only log the occurence (not in this function), for
409          *   use in later synchronization
410          * fence-acquire (this function): search for hypothetical release
411          *   sequences
412          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
413          */
414         bool updated = false;
415         if (curr->is_acquire()) {
416                 action_list_t *list = &action_trace;
417                 action_list_t::reverse_iterator rit;
418                 /* Find X : is_read(X) && X --sb-> curr */
419                 for (rit = list->rbegin();rit != list->rend();rit++) {
420                         ModelAction *act = *rit;
421                         if (act == curr)
422                                 continue;
423                         if (act->get_tid() != curr->get_tid())
424                                 continue;
425                         /* Stop at the beginning of the thread */
426                         if (act->is_thread_start())
427                                 break;
428                         /* Stop once we reach a prior fence-acquire */
429                         if (act->is_fence() && act->is_acquire())
430                                 break;
431                         if (!act->is_read())
432                                 continue;
433                         /* read-acquire will find its own release sequences */
434                         if (act->is_acquire())
435                                 continue;
436
437                         /* Establish hypothetical release sequences */
438                         rel_heads_list_t release_heads;
439                         get_release_seq_heads(curr, act, &release_heads);
440                         for (unsigned int i = 0;i < release_heads.size();i++)
441                                 synchronize(release_heads[i], curr);
442                         if (release_heads.size() != 0)
443                                 updated = true;
444                 }
445         }
446         return updated;
447 }
448
449 /**
450  * @brief Process the current action for thread-related activity
451  *
452  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
453  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
454  * synchronization, etc.  This function is a no-op for non-THREAD actions
455  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
456  *
457  * @param curr The current action
458  * @return True if synchronization was updated or a thread completed
459  */
460 bool ModelExecution::process_thread_action(ModelAction *curr)
461 {
462         bool updated = false;
463
464         switch (curr->get_type()) {
465         case THREAD_CREATE: {
466                 thrd_t *thrd = (thrd_t *)curr->get_location();
467                 struct thread_params *params = (struct thread_params *)curr->get_value();
468                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
469                 curr->set_thread_operand(th);
470                 add_thread(th);
471                 th->set_creation(curr);
472                 break;
473         }
474         case PTHREAD_CREATE: {
475                 (*(uint32_t *)curr->get_location()) = pthread_counter++;
476
477                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
478                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
479                 curr->set_thread_operand(th);
480                 add_thread(th);
481                 th->set_creation(curr);
482
483                 if ( pthread_map.size() < pthread_counter )
484                         pthread_map.resize( pthread_counter );
485                 pthread_map[ pthread_counter-1 ] = th;
486
487                 break;
488         }
489         case THREAD_JOIN: {
490                 Thread *blocking = curr->get_thread_operand();
491                 ModelAction *act = get_last_action(blocking->get_id());
492                 synchronize(act, curr);
493                 updated = true; /* trigger rel-seq checks */
494                 break;
495         }
496         case PTHREAD_JOIN: {
497                 Thread *blocking = curr->get_thread_operand();
498                 ModelAction *act = get_last_action(blocking->get_id());
499                 synchronize(act, curr);
500                 updated = true; /* trigger rel-seq checks */
501                 break;  // WL: to be add (modified)
502         }
503
504         case THREAD_FINISH: {
505                 Thread *th = get_thread(curr);
506                 /* Wake up any joining threads */
507                 for (unsigned int i = 0;i < get_num_threads();i++) {
508                         Thread *waiting = get_thread(int_to_id(i));
509                         if (waiting->waiting_on() == th &&
510                                         waiting->get_pending()->is_thread_join())
511                                 scheduler->wake(waiting);
512                 }
513                 th->complete();
514                 updated = true; /* trigger rel-seq checks */
515                 break;
516         }
517         case THREAD_START: {
518                 break;
519         }
520         default:
521                 break;
522         }
523
524         return updated;
525 }
526
527 /**
528  * Initialize the current action by performing one or more of the following
529  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
530  * in the NodeStack, manipulating backtracking sets, allocating and
531  * initializing clock vectors, and computing the promises to fulfill.
532  *
533  * @param curr The current action, as passed from the user context; may be
534  * freed/invalidated after the execution of this function, with a different
535  * action "returned" its place (pass-by-reference)
536  * @return True if curr is a newly-explored action; false otherwise
537  */
538 bool ModelExecution::initialize_curr_action(ModelAction **curr)
539 {
540         ModelAction *newcurr;
541
542         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
543                 newcurr = process_rmw(*curr);
544                 delete *curr;
545
546                 *curr = newcurr;
547                 return false;
548         }
549
550         (*curr)->set_seq_number(get_next_seq_num());
551
552         newcurr = node_stack->explore_action(*curr);
553         if (newcurr) {
554                 /* First restore type and order in case of RMW operation */
555                 if ((*curr)->is_rmwr())
556                         newcurr->copy_typeandorder(*curr);
557
558                 ASSERT((*curr)->get_location() == newcurr->get_location());
559                 newcurr->copy_from_new(*curr);
560
561                 /* Discard duplicate ModelAction; use action from NodeStack */
562                 delete *curr;
563
564                 /* Always compute new clock vector */
565                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
566
567                 *curr = newcurr;
568                 return false;   /* Action was explored previously */
569         } else {
570                 newcurr = *curr;
571
572                 /* Always compute new clock vector */
573                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
574
575                 /* Assign most recent release fence */
576                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
577
578                 return true;    /* This was a new ModelAction */
579         }
580 }
581
582 /**
583  * @brief Establish reads-from relation between two actions
584  *
585  * Perform basic operations involved with establishing a concrete rf relation,
586  * including setting the ModelAction data and checking for release sequences.
587  *
588  * @param act The action that is reading (must be a read)
589  * @param rf The action from which we are reading (must be a write)
590  *
591  * @return True if this read established synchronization
592  */
593
594 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
595 {
596         ASSERT(rf);
597         ASSERT(rf->is_write());
598
599         act->set_read_from(rf);
600         if (act->is_acquire()) {
601                 rel_heads_list_t release_heads;
602                 get_release_seq_heads(act, act, &release_heads);
603                 int num_heads = release_heads.size();
604                 for (unsigned int i = 0;i < release_heads.size();i++)
605                         if (!synchronize(release_heads[i], act))
606                                 num_heads--;
607                 return num_heads > 0;
608         }
609         return false;
610 }
611
612 /**
613  * @brief Synchronizes two actions
614  *
615  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
616  * This function performs the synchronization as well as providing other hooks
617  * for other checks along with synchronization.
618  *
619  * @param first The left-hand side of the synchronizes-with relation
620  * @param second The right-hand side of the synchronizes-with relation
621  * @return True if the synchronization was successful (i.e., was consistent
622  * with the execution order); false otherwise
623  */
624 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
625 {
626         if (*second < *first) {
627                 set_bad_synchronization();
628                 return false;
629         }
630         return second->synchronize_with(first);
631 }
632
633 /**
634  * @brief Check whether a model action is enabled.
635  *
636  * Checks whether an operation would be successful (i.e., is a lock already
637  * locked, or is the joined thread already complete).
638  *
639  * For yield-blocking, yields are never enabled.
640  *
641  * @param curr is the ModelAction to check whether it is enabled.
642  * @return a bool that indicates whether the action is enabled.
643  */
644 bool ModelExecution::check_action_enabled(ModelAction *curr) {
645         if (curr->is_lock()) {
646                 cdsc::mutex *lock = curr->get_mutex();
647                 struct cdsc::mutex_state *state = lock->get_state();
648                 if (state->locked)
649                         return false;
650         } else if (curr->is_thread_join()) {
651                 Thread *blocking = curr->get_thread_operand();
652                 if (!blocking->is_complete()) {
653                         return false;
654                 }
655         }
656
657         return true;
658 }
659
660 /**
661  * This is the heart of the model checker routine. It performs model-checking
662  * actions corresponding to a given "current action." Among other processes, it
663  * calculates reads-from relationships, updates synchronization clock vectors,
664  * forms a memory_order constraints graph, and handles replay/backtrack
665  * execution when running permutations of previously-observed executions.
666  *
667  * @param curr The current action to process
668  * @return The ModelAction that is actually executed; may be different than
669  * curr
670  */
671 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
672 {
673         ASSERT(curr);
674         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
675         bool newly_explored = initialize_curr_action(&curr);
676
677         DBG();
678
679         wake_up_sleeping_actions(curr);
680
681         /* Add the action to lists before any other model-checking tasks */
682         if (!second_part_of_rmw && curr->get_type() != NOOP)
683                 add_action_to_lists(curr);
684
685         SnapVector<const ModelAction *> * rf_set = NULL;
686         /* Build may_read_from set for newly-created actions */
687         if (newly_explored && curr->is_read())
688                 rf_set = build_may_read_from(curr);
689
690         process_thread_action(curr);
691
692         if (curr->is_read() && !second_part_of_rmw) {
693                 process_read(curr, rf_set);
694                 delete rf_set;
695         } else {
696                 ASSERT(rf_set == NULL);
697         }
698
699         if (curr->is_write())
700                 process_write(curr);
701
702         if (curr->is_fence())
703                 process_fence(curr);
704
705         if (curr->is_mutex_op())
706                 process_mutex(curr);
707
708         return curr;
709 }
710
711 /**
712  * This is the strongest feasibility check available.
713  * @return whether the current trace (partial or complete) must be a prefix of
714  * a feasible trace.
715  */
716 bool ModelExecution::isfeasibleprefix() const
717 {
718         return !is_infeasible();
719 }
720
721 /**
722  * Print disagnostic information about an infeasible execution
723  * @param prefix A string to prefix the output with; if NULL, then a default
724  * message prefix will be provided
725  */
726 void ModelExecution::print_infeasibility(const char *prefix) const
727 {
728         char buf[100];
729         char *ptr = buf;
730         if (priv->bad_synchronization)
731                 ptr += sprintf(ptr, "[bad sw ordering]");
732         if (ptr != buf)
733                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
734 }
735
736 /**
737  * Check if the current partial trace is infeasible. Does not check any
738  * end-of-execution flags, which might rule out the execution. Thus, this is
739  * useful only for ruling an execution as infeasible.
740  * @return whether the current partial trace is infeasible.
741  */
742 bool ModelExecution::is_infeasible() const
743 {
744         return priv->bad_synchronization;
745 }
746
747 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
748 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
749         ModelAction *lastread = get_last_action(act->get_tid());
750         lastread->process_rmw(act);
751         if (act->is_rmw()) {
752                 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
753         }
754         return lastread;
755 }
756
757 /**
758  * @brief Updates the mo_graph with the constraints imposed from the current
759  * read.
760  *
761  * Basic idea is the following: Go through each other thread and find
762  * the last action that happened before our read.  Two cases:
763  *
764  * -# The action is a write: that write must either occur before
765  * the write we read from or be the write we read from.
766  * -# The action is a read: the write that that action read from
767  * must occur before the write we read from or be the same write.
768  *
769  * @param curr The current action. Must be a read.
770  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
771  * @return True if modification order edges were added; false otherwise
772  */
773
774 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
775 {
776         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
777         unsigned int i;
778         ASSERT(curr->is_read());
779
780         /* Last SC fence in the current thread */
781         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
782
783         int tid = curr->get_tid();
784         ModelAction *prev_same_thread = NULL;
785         /* Iterate over all threads */
786         for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
787                 /* Last SC fence in thread tid */
788                 ModelAction *last_sc_fence_thread_local = NULL;
789                 if (i != 0)
790                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
791
792                 /* Last SC fence in thread tid, before last SC fence in current thread */
793                 ModelAction *last_sc_fence_thread_before = NULL;
794                 if (last_sc_fence_local)
795                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
796
797                 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
798                 if (prev_same_thread != NULL &&
799                                 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
800                                 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
801                         continue;
802                 }
803
804                 /* Iterate over actions in thread, starting from most recent */
805                 action_list_t *list = &(*thrd_lists)[tid];
806                 action_list_t::reverse_iterator rit;
807                 for (rit = list->rbegin();rit != list->rend();rit++) {
808                         ModelAction *act = *rit;
809
810                         /* Skip curr */
811                         if (act == curr)
812                                 continue;
813                         /* Don't want to add reflexive edges on 'rf' */
814                         if (act->equals(rf)) {
815                                 if (act->happens_before(curr))
816                                         break;
817                                 else
818                                         continue;
819                         }
820
821                         if (act->is_write()) {
822                                 /* C++, Section 29.3 statement 5 */
823                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
824                                                 *act < *last_sc_fence_thread_local) {
825                                         if (mo_graph->checkReachable(rf, act))
826                                                 return false;
827                                         priorset->push_back(act);
828                                         break;
829                                 }
830                                 /* C++, Section 29.3 statement 4 */
831                                 else if (act->is_seqcst() && last_sc_fence_local &&
832                                                                  *act < *last_sc_fence_local) {
833                                         if (mo_graph->checkReachable(rf, act))
834                                                 return false;
835                                         priorset->push_back(act);
836                                         break;
837                                 }
838                                 /* C++, Section 29.3 statement 6 */
839                                 else if (last_sc_fence_thread_before &&
840                                                                  *act < *last_sc_fence_thread_before) {
841                                         if (mo_graph->checkReachable(rf, act))
842                                                 return false;
843                                         priorset->push_back(act);
844                                         break;
845                                 }
846                         }
847
848                         /*
849                          * Include at most one act per-thread that "happens
850                          * before" curr
851                          */
852                         if (act->happens_before(curr)) {
853                                 if (i==0) {
854                                         if (last_sc_fence_local == NULL ||
855                                                         (*last_sc_fence_local < *prev_same_thread)) {
856                                                 prev_same_thread = act;
857                                         }
858                                 }
859                                 if (act->is_write()) {
860                                         if (mo_graph->checkReachable(rf, act))
861                                                 return false;
862                                         priorset->push_back(act);
863                                 } else {
864                                         const ModelAction *prevrf = act->get_reads_from();
865                                         if (!prevrf->equals(rf)) {
866                                                 if (mo_graph->checkReachable(rf, prevrf))
867                                                         return false;
868                                                 priorset->push_back(prevrf);
869                                         } else {
870                                                 if (act->get_tid() == curr->get_tid()) {
871                                                         //Can prune curr from obj list
872                                                         *canprune = true;
873                                                 }
874                                         }
875                                 }
876                                 break;
877                         }
878                 }
879         }
880         return true;
881 }
882
883 /**
884  * Updates the mo_graph with the constraints imposed from the current write.
885  *
886  * Basic idea is the following: Go through each other thread and find
887  * the lastest action that happened before our write.  Two cases:
888  *
889  * (1) The action is a write => that write must occur before
890  * the current write
891  *
892  * (2) The action is a read => the write that that action read from
893  * must occur before the current write.
894  *
895  * This method also handles two other issues:
896  *
897  * (I) Sequential Consistency: Making sure that if the current write is
898  * seq_cst, that it occurs after the previous seq_cst write.
899  *
900  * (II) Sending the write back to non-synchronizing reads.
901  *
902  * @param curr The current action. Must be a write.
903  * @param send_fv A vector for stashing reads to which we may pass our future
904  * value. If NULL, then don't record any future values.
905  * @return True if modification order edges were added; false otherwise
906  */
907 void ModelExecution::w_modification_order(ModelAction *curr)
908 {
909         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
910         unsigned int i;
911         ASSERT(curr->is_write());
912
913         if (curr->is_seqcst()) {
914                 /* We have to at least see the last sequentially consistent write,
915                          so we are initialized. */
916                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
917                 if (last_seq_cst != NULL) {
918                         mo_graph->addEdge(last_seq_cst, curr);
919                 }
920         }
921
922         /* Last SC fence in the current thread */
923         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
924
925         /* Iterate over all threads */
926         for (i = 0;i < thrd_lists->size();i++) {
927                 /* Last SC fence in thread i, before last SC fence in current thread */
928                 ModelAction *last_sc_fence_thread_before = NULL;
929                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
930                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
931
932                 /* Iterate over actions in thread, starting from most recent */
933                 action_list_t *list = &(*thrd_lists)[i];
934                 action_list_t::reverse_iterator rit;
935                 bool force_edge = false;
936                 for (rit = list->rbegin();rit != list->rend();rit++) {
937                         ModelAction *act = *rit;
938                         if (act == curr) {
939                                 /*
940                                  * 1) If RMW and it actually read from something, then we
941                                  * already have all relevant edges, so just skip to next
942                                  * thread.
943                                  *
944                                  * 2) If RMW and it didn't read from anything, we should
945                                  * whatever edge we can get to speed up convergence.
946                                  *
947                                  * 3) If normal write, we need to look at earlier actions, so
948                                  * continue processing list.
949                                  */
950                                 force_edge = true;
951                                 if (curr->is_rmw()) {
952                                         if (curr->get_reads_from() != NULL)
953                                                 break;
954                                         else
955                                                 continue;
956                                 } else
957                                         continue;
958                         }
959
960                         /* C++, Section 29.3 statement 7 */
961                         if (last_sc_fence_thread_before && act->is_write() &&
962                                         *act < *last_sc_fence_thread_before) {
963                                 mo_graph->addEdge(act, curr, force_edge);
964                                 break;
965                         }
966
967                         /*
968                          * Include at most one act per-thread that "happens
969                          * before" curr
970                          */
971                         if (act->happens_before(curr)) {
972                                 /*
973                                  * Note: if act is RMW, just add edge:
974                                  *   act --mo--> curr
975                                  * The following edge should be handled elsewhere:
976                                  *   readfrom(act) --mo--> act
977                                  */
978                                 if (act->is_write())
979                                         mo_graph->addEdge(act, curr, force_edge);
980                                 else if (act->is_read()) {
981                                         //if previous read accessed a null, just keep going
982                                         mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
983                                 }
984                                 break;
985                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
986                                                                  !act->same_thread(curr)) {
987                                 /* We have an action that:
988                                    (1) did not happen before us
989                                    (2) is a read and we are a write
990                                    (3) cannot synchronize with us
991                                    (4) is in a different thread
992                                    =>
993                                    that read could potentially read from our write.  Note that
994                                    these checks are overly conservative at this point, we'll
995                                    do more checks before actually removing the
996                                    pendingfuturevalue.
997
998                                  */
999
1000                         }
1001                 }
1002         }
1003 }
1004
1005 /**
1006  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1007  * some constraints. This method checks one the following constraint (others
1008  * require compiler support):
1009  *
1010  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1011  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1012  */
1013 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1014 {
1015         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1016         unsigned int i;
1017         /* Iterate over all threads */
1018         for (i = 0;i < thrd_lists->size();i++) {
1019                 const ModelAction *write_after_read = NULL;
1020
1021                 /* Iterate over actions in thread, starting from most recent */
1022                 action_list_t *list = &(*thrd_lists)[i];
1023                 action_list_t::reverse_iterator rit;
1024                 for (rit = list->rbegin();rit != list->rend();rit++) {
1025                         ModelAction *act = *rit;
1026
1027                         /* Don't disallow due to act == reader */
1028                         if (!reader->happens_before(act) || reader == act)
1029                                 break;
1030                         else if (act->is_write())
1031                                 write_after_read = act;
1032                         else if (act->is_read() && act->get_reads_from() != NULL)
1033                                 write_after_read = act->get_reads_from();
1034                 }
1035
1036                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1037                         return false;
1038         }
1039         return true;
1040 }
1041
1042 /**
1043  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1044  * The ModelAction under consideration is expected to be taking part in
1045  * release/acquire synchronization as an object of the "reads from" relation.
1046  * Note that this can only provide release sequence support for RMW chains
1047  * which do not read from the future, as those actions cannot be traced until
1048  * their "promise" is fulfilled. Similarly, we may not even establish the
1049  * presence of a release sequence with certainty, as some modification order
1050  * constraints may be decided further in the future. Thus, this function
1051  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1052  * and a boolean representing certainty.
1053  *
1054  * @param rf The action that might be part of a release sequence. Must be a
1055  * write.
1056  * @param release_heads A pass-by-reference style return parameter. After
1057  * execution of this function, release_heads will contain the heads of all the
1058  * relevant release sequences, if any exists with certainty
1059  * @return true, if the ModelExecution is certain that release_heads is complete;
1060  * false otherwise
1061  */
1062 bool ModelExecution::release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const
1063 {
1064
1065         for ( ;rf != NULL;rf = rf->get_reads_from()) {
1066                 ASSERT(rf->is_write());
1067
1068                 if (rf->is_release())
1069                         release_heads->push_back(rf);
1070                 else if (rf->get_last_fence_release())
1071                         release_heads->push_back(rf->get_last_fence_release());
1072                 if (!rf->is_rmw())
1073                         break;/* End of RMW chain */
1074
1075                 /** @todo Need to be smarter here...  In the linux lock
1076                  * example, this will run to the beginning of the program for
1077                  * every acquire. */
1078                 /** @todo The way to be smarter here is to keep going until 1
1079                  * thread has a release preceded by an acquire and you've seen
1080                  *       both. */
1081
1082                 /* acq_rel RMW is a sufficient stopping condition */
1083                 if (rf->is_acquire() && rf->is_release())
1084                         return true;/* complete */
1085         };
1086         ASSERT(rf);     // Needs to be real write
1087
1088         if (rf->is_release())
1089                 return true;/* complete */
1090
1091         /* else relaxed write
1092          * - check for fence-release in the same thread (29.8, stmt. 3)
1093          * - check modification order for contiguous subsequence
1094          *   -> rf must be same thread as release */
1095
1096         const ModelAction *fence_release = rf->get_last_fence_release();
1097         /* Synchronize with a fence-release unconditionally; we don't need to
1098          * find any more "contiguous subsequence..." for it */
1099         if (fence_release)
1100                 release_heads->push_back(fence_release);
1101
1102         return true;    /* complete */
1103 }
1104
1105 /**
1106  * An interface for getting the release sequence head(s) with which a
1107  * given ModelAction must synchronize. This function only returns a non-empty
1108  * result when it can locate a release sequence head with certainty. Otherwise,
1109  * it may mark the internal state of the ModelExecution so that it will handle
1110  * the release sequence at a later time, causing @a acquire to update its
1111  * synchronization at some later point in execution.
1112  *
1113  * @param acquire The 'acquire' action that may synchronize with a release
1114  * sequence
1115  * @param read The read action that may read from a release sequence; this may
1116  * be the same as acquire, or else an earlier action in the same thread (i.e.,
1117  * when 'acquire' is a fence-acquire)
1118  * @param release_heads A pass-by-reference return parameter. Will be filled
1119  * with the head(s) of the release sequence(s), if they exists with certainty.
1120  * @see ModelExecution::release_seq_heads
1121  */
1122 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
1123                                                                                                                                                                          ModelAction *read, rel_heads_list_t *release_heads)
1124 {
1125         const ModelAction *rf = read->get_reads_from();
1126
1127         release_seq_heads(rf, release_heads);
1128 }
1129
1130 /**
1131  * Performs various bookkeeping operations for the current ModelAction. For
1132  * instance, adds action to the per-object, per-thread action vector and to the
1133  * action trace list of all thread actions.
1134  *
1135  * @param act is the ModelAction to add.
1136  */
1137 void ModelExecution::add_action_to_lists(ModelAction *act)
1138 {
1139         int tid = id_to_int(act->get_tid());
1140         ModelAction *uninit = NULL;
1141         int uninit_id = -1;
1142         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1143         if (list->empty() && act->is_atomic_var()) {
1144                 uninit = get_uninitialized_action(act);
1145                 uninit_id = id_to_int(uninit->get_tid());
1146                 list->push_front(uninit);
1147         }
1148         list->push_back(act);
1149
1150         action_trace.push_back(act);
1151         if (uninit)
1152                 action_trace.push_front(uninit);
1153
1154         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1155         if (tid >= (int)vec->size())
1156                 vec->resize(priv->next_thread_id);
1157         (*vec)[tid].push_back(act);
1158         if (uninit)
1159                 (*vec)[uninit_id].push_front(uninit);
1160
1161         if ((int)thrd_last_action.size() <= tid)
1162                 thrd_last_action.resize(get_num_threads());
1163         thrd_last_action[tid] = act;
1164         if (uninit)
1165                 thrd_last_action[uninit_id] = uninit;
1166
1167         if (act->is_fence() && act->is_release()) {
1168                 if ((int)thrd_last_fence_release.size() <= tid)
1169                         thrd_last_fence_release.resize(get_num_threads());
1170                 thrd_last_fence_release[tid] = act;
1171         }
1172
1173         if (act->is_wait()) {
1174                 void *mutex_loc = (void *) act->get_value();
1175                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1176
1177                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1178                 if (tid >= (int)vec->size())
1179                         vec->resize(priv->next_thread_id);
1180                 (*vec)[tid].push_back(act);
1181         }
1182 }
1183
1184 /**
1185  * @brief Get the last action performed by a particular Thread
1186  * @param tid The thread ID of the Thread in question
1187  * @return The last action in the thread
1188  */
1189 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1190 {
1191         int threadid = id_to_int(tid);
1192         if (threadid < (int)thrd_last_action.size())
1193                 return thrd_last_action[id_to_int(tid)];
1194         else
1195                 return NULL;
1196 }
1197
1198 /**
1199  * @brief Get the last fence release performed by a particular Thread
1200  * @param tid The thread ID of the Thread in question
1201  * @return The last fence release in the thread, if one exists; NULL otherwise
1202  */
1203 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1204 {
1205         int threadid = id_to_int(tid);
1206         if (threadid < (int)thrd_last_fence_release.size())
1207                 return thrd_last_fence_release[id_to_int(tid)];
1208         else
1209                 return NULL;
1210 }
1211
1212 /**
1213  * Gets the last memory_order_seq_cst write (in the total global sequence)
1214  * performed on a particular object (i.e., memory location), not including the
1215  * current action.
1216  * @param curr The current ModelAction; also denotes the object location to
1217  * check
1218  * @return The last seq_cst write
1219  */
1220 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1221 {
1222         void *location = curr->get_location();
1223         action_list_t *list = obj_map.get(location);
1224         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1225         action_list_t::reverse_iterator rit;
1226         for (rit = list->rbegin();(*rit) != curr;rit++)
1227                 ;
1228         rit++;  /* Skip past curr */
1229         for ( ;rit != list->rend();rit++)
1230                 if ((*rit)->is_write() && (*rit)->is_seqcst())
1231                         return *rit;
1232         return NULL;
1233 }
1234
1235 /**
1236  * Gets the last memory_order_seq_cst fence (in the total global sequence)
1237  * performed in a particular thread, prior to a particular fence.
1238  * @param tid The ID of the thread to check
1239  * @param before_fence The fence from which to begin the search; if NULL, then
1240  * search for the most recent fence in the thread.
1241  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1242  */
1243 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1244 {
1245         /* All fences should have location FENCE_LOCATION */
1246         action_list_t *list = obj_map.get(FENCE_LOCATION);
1247
1248         if (!list)
1249                 return NULL;
1250
1251         action_list_t::reverse_iterator rit = list->rbegin();
1252
1253         if (before_fence) {
1254                 for (;rit != list->rend();rit++)
1255                         if (*rit == before_fence)
1256                                 break;
1257
1258                 ASSERT(*rit == before_fence);
1259                 rit++;
1260         }
1261
1262         for (;rit != list->rend();rit++)
1263                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1264                         return *rit;
1265         return NULL;
1266 }
1267
1268 /**
1269  * Gets the last unlock operation performed on a particular mutex (i.e., memory
1270  * location). This function identifies the mutex according to the current
1271  * action, which is presumed to perform on the same mutex.
1272  * @param curr The current ModelAction; also denotes the object location to
1273  * check
1274  * @return The last unlock operation
1275  */
1276 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1277 {
1278         void *location = curr->get_location();
1279
1280         action_list_t *list = obj_map.get(location);
1281         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1282         action_list_t::reverse_iterator rit;
1283         for (rit = list->rbegin();rit != list->rend();rit++)
1284                 if ((*rit)->is_unlock() || (*rit)->is_wait())
1285                         return *rit;
1286         return NULL;
1287 }
1288
1289 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1290 {
1291         ModelAction *parent = get_last_action(tid);
1292         if (!parent)
1293                 parent = get_thread(tid)->get_creation();
1294         return parent;
1295 }
1296
1297 /**
1298  * Returns the clock vector for a given thread.
1299  * @param tid The thread whose clock vector we want
1300  * @return Desired clock vector
1301  */
1302 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1303 {
1304         ModelAction *firstaction=get_parent_action(tid);
1305         return firstaction != NULL ? firstaction->get_cv() : NULL;
1306 }
1307
1308 bool valequals(uint64_t val1, uint64_t val2, int size) {
1309         switch(size) {
1310         case 1:
1311                 return ((uint8_t)val1) == ((uint8_t)val2);
1312         case 2:
1313                 return ((uint16_t)val1) == ((uint16_t)val2);
1314         case 4:
1315                 return ((uint32_t)val1) == ((uint32_t)val2);
1316         case 8:
1317                 return val1==val2;
1318         default:
1319                 ASSERT(0);
1320                 return false;
1321         }
1322 }
1323
1324 /**
1325  * Build up an initial set of all past writes that this 'read' action may read
1326  * from, as well as any previously-observed future values that must still be valid.
1327  *
1328  * @param curr is the current ModelAction that we are exploring; it must be a
1329  * 'read' operation.
1330  */
1331 SnapVector<const ModelAction *> *  ModelExecution::build_may_read_from(ModelAction *curr)
1332 {
1333         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1334         unsigned int i;
1335         ASSERT(curr->is_read());
1336
1337         ModelAction *last_sc_write = NULL;
1338
1339         if (curr->is_seqcst())
1340                 last_sc_write = get_last_seq_cst_write(curr);
1341
1342         SnapVector<const ModelAction *> * rf_set = new SnapVector<const ModelAction *>();
1343
1344         /* Iterate over all threads */
1345         for (i = 0;i < thrd_lists->size();i++) {
1346                 /* Iterate over actions in thread, starting from most recent */
1347                 action_list_t *list = &(*thrd_lists)[i];
1348                 action_list_t::reverse_iterator rit;
1349                 for (rit = list->rbegin();rit != list->rend();rit++) {
1350                         const ModelAction *act = *rit;
1351
1352                         /* Only consider 'write' actions */
1353                         if (!act->is_write()) {
1354                                 if (act != curr && act->is_read() && act->happens_before(curr)) {
1355                                         const ModelAction *tmp = act->get_reads_from();
1356                                         if (((unsigned int) id_to_int(tmp->get_tid()))==i)
1357                                                 act = tmp;
1358                                         else
1359                                                 break;
1360                                 } else
1361                                         continue;
1362                         }
1363
1364                         if (act == curr)
1365                                 continue;
1366
1367                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1368                         bool allow_read = true;
1369
1370                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1371                                 allow_read = false;
1372
1373                         /* Need to check whether we will have two RMW reading from the same value */
1374                         if (curr->is_rmwr()) {
1375                                 /* It is okay if we have a failing CAS */
1376                                 if (!curr->is_rmwrcas() ||
1377                                                 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1378                                         //Need to make sure we aren't the second RMW
1379                                         CycleNode * node = mo_graph->getNode_noCreate(act);
1380                                         if (node != NULL && node->getRMW() != NULL) {
1381                                                 //we are the second RMW
1382                                                 allow_read = false;
1383                                         }
1384                                 }
1385                         }
1386
1387                         if (allow_read) {
1388                                 /* Only add feasible reads */
1389                                 rf_set->push_back(act);
1390                         }
1391
1392                         /* Include at most one act per-thread that "happens before" curr */
1393                         if (act->happens_before(curr))
1394                                 break;
1395                 }
1396         }
1397
1398         if (DBG_ENABLED()) {
1399                 model_print("Reached read action:\n");
1400                 curr->print();
1401                 model_print("End printing read_from_past\n");
1402         }
1403         return rf_set;
1404 }
1405
1406 /**
1407  * @brief Get an action representing an uninitialized atomic
1408  *
1409  * This function may create a new one or try to retrieve one from the NodeStack
1410  *
1411  * @param curr The current action, which prompts the creation of an UNINIT action
1412  * @return A pointer to the UNINIT ModelAction
1413  */
1414 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
1415 {
1416         Node *node = curr->get_node();
1417         ModelAction *act = node->get_uninit_action();
1418         if (!act) {
1419                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1420                 node->set_uninit_action(act);
1421         }
1422         act->create_cv(NULL);
1423         return act;
1424 }
1425
1426 static void print_list(const action_list_t *list)
1427 {
1428         action_list_t::const_iterator it;
1429
1430         model_print("------------------------------------------------------------------------------------\n");
1431         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
1432         model_print("------------------------------------------------------------------------------------\n");
1433
1434         unsigned int hash = 0;
1435
1436         for (it = list->begin();it != list->end();it++) {
1437                 const ModelAction *act = *it;
1438                 if (act->get_seq_number() > 0)
1439                         act->print();
1440                 hash = hash^(hash<<3)^((*it)->hash());
1441         }
1442         model_print("HASH %u\n", hash);
1443         model_print("------------------------------------------------------------------------------------\n");
1444 }
1445
1446 #if SUPPORT_MOD_ORDER_DUMP
1447 void ModelExecution::dumpGraph(char *filename) const
1448 {
1449         char buffer[200];
1450         sprintf(buffer, "%s.dot", filename);
1451         FILE *file = fopen(buffer, "w");
1452         fprintf(file, "digraph %s {\n", filename);
1453         mo_graph->dumpNodes(file);
1454         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1455
1456         for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1457                 ModelAction *act = *it;
1458                 if (act->is_read()) {
1459                         mo_graph->dot_print_node(file, act);
1460                         mo_graph->dot_print_edge(file,
1461                                                                                                                          act->get_reads_from(),
1462                                                                                                                          act,
1463                                                                                                                          "label=\"rf\", color=red, weight=2");
1464                 }
1465                 if (thread_array[act->get_tid()]) {
1466                         mo_graph->dot_print_edge(file,
1467                                                                                                                          thread_array[id_to_int(act->get_tid())],
1468                                                                                                                          act,
1469                                                                                                                          "label=\"sb\", color=blue, weight=400");
1470                 }
1471
1472                 thread_array[act->get_tid()] = act;
1473         }
1474         fprintf(file, "}\n");
1475         model_free(thread_array);
1476         fclose(file);
1477 }
1478 #endif
1479
1480 /** @brief Prints an execution trace summary. */
1481 void ModelExecution::print_summary() const
1482 {
1483 #if SUPPORT_MOD_ORDER_DUMP
1484         char buffername[100];
1485         sprintf(buffername, "exec%04u", get_execution_number());
1486         mo_graph->dumpGraphToFile(buffername);
1487         sprintf(buffername, "graph%04u", get_execution_number());
1488         dumpGraph(buffername);
1489 #endif
1490
1491         model_print("Execution trace %d:", get_execution_number());
1492         if (isfeasibleprefix()) {
1493                 if (scheduler->all_threads_sleeping())
1494                         model_print(" SLEEP-SET REDUNDANT");
1495                 if (have_bug_reports())
1496                         model_print(" DETECTED BUG(S)");
1497         } else
1498                 print_infeasibility(" INFEASIBLE");
1499         model_print("\n");
1500
1501         print_list(&action_trace);
1502         model_print("\n");
1503
1504 }
1505
1506 /**
1507  * Add a Thread to the system for the first time. Should only be called once
1508  * per thread.
1509  * @param t The Thread to add
1510  */
1511 void ModelExecution::add_thread(Thread *t)
1512 {
1513         unsigned int i = id_to_int(t->get_id());
1514         if (i >= thread_map.size())
1515                 thread_map.resize(i + 1);
1516         thread_map[i] = t;
1517         if (!t->is_model_thread())
1518                 scheduler->add_thread(t);
1519 }
1520
1521 /**
1522  * @brief Get a Thread reference by its ID
1523  * @param tid The Thread's ID
1524  * @return A Thread reference
1525  */
1526 Thread * ModelExecution::get_thread(thread_id_t tid) const
1527 {
1528         unsigned int i = id_to_int(tid);
1529         if (i < thread_map.size())
1530                 return thread_map[i];
1531         return NULL;
1532 }
1533
1534 /**
1535  * @brief Get a reference to the Thread in which a ModelAction was executed
1536  * @param act The ModelAction
1537  * @return A Thread reference
1538  */
1539 Thread * ModelExecution::get_thread(const ModelAction *act) const
1540 {
1541         return get_thread(act->get_tid());
1542 }
1543
1544 /**
1545  * @brief Get a Thread reference by its pthread ID
1546  * @param index The pthread's ID
1547  * @return A Thread reference
1548  */
1549 Thread * ModelExecution::get_pthread(pthread_t pid) {
1550         union {
1551                 pthread_t p;
1552                 uint32_t v;
1553         } x;
1554         x.p = pid;
1555         uint32_t thread_id = x.v;
1556         if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1557         else return NULL;
1558 }
1559
1560 /**
1561  * @brief Check if a Thread is currently enabled
1562  * @param t The Thread to check
1563  * @return True if the Thread is currently enabled
1564  */
1565 bool ModelExecution::is_enabled(Thread *t) const
1566 {
1567         return scheduler->is_enabled(t);
1568 }
1569
1570 /**
1571  * @brief Check if a Thread is currently enabled
1572  * @param tid The ID of the Thread to check
1573  * @return True if the Thread is currently enabled
1574  */
1575 bool ModelExecution::is_enabled(thread_id_t tid) const
1576 {
1577         return scheduler->is_enabled(tid);
1578 }
1579
1580 /**
1581  * @brief Select the next thread to execute based on the curren action
1582  *
1583  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1584  * actions should be followed by the execution of their child thread. In either
1585  * case, the current action should determine the next thread schedule.
1586  *
1587  * @param curr The current action
1588  * @return The next thread to run, if the current action will determine this
1589  * selection; otherwise NULL
1590  */
1591 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1592 {
1593         /* Do not split atomic RMW */
1594         if (curr->is_rmwr())
1595                 return get_thread(curr);
1596         /* Follow CREATE with the created thread */
1597         /* which is not needed, because model.cc takes care of this */
1598         if (curr->get_type() == THREAD_CREATE)
1599                 return curr->get_thread_operand();
1600         if (curr->get_type() == PTHREAD_CREATE) {
1601                 return curr->get_thread_operand();
1602         }
1603         return NULL;
1604 }
1605
1606 /**
1607  * Takes the next step in the execution, if possible.
1608  * @param curr The current step to take
1609  * @return Returns the next Thread to run, if any; NULL if this execution
1610  * should terminate
1611  */
1612 Thread * ModelExecution::take_step(ModelAction *curr)
1613 {
1614         Thread *curr_thrd = get_thread(curr);
1615         ASSERT(curr_thrd->get_state() == THREAD_READY);
1616
1617         ASSERT(check_action_enabled(curr));     /* May have side effects? */
1618         curr = check_current_action(curr);
1619         ASSERT(curr);
1620
1621         // model_print("poitner loc: %p, thread: %d, type: %d, order: %d, position: %s\n", curr, curr->get_tid(), curr->get_type(), curr->get_mo(), curr->get_position() );
1622         model->get_history()->add_func_atomic( curr, curr_thrd->get_id() );
1623
1624         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1625                 scheduler->remove_thread(curr_thrd);
1626
1627         return action_select_next_thread(curr);
1628 }
1629
1630 Fuzzer * ModelExecution::getFuzzer() {
1631         return fuzzer;
1632 }