dabf550995b4fb7cb02a4d93e74cd476fdbf9a96
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 /**
21  * Structure for holding small ModelChecker members that should be snapshotted
22  */
23 struct model_snapshot_members {
24         model_snapshot_members() :
25                 /* First thread created will have id INITIAL_THREAD_ID */
26                 next_thread_id(INITIAL_THREAD_ID),
27                 used_sequence_numbers(0),
28                 next_backtrack(NULL),
29                 bugs(),
30                 too_many_reads(false),
31                 no_valid_reads(false),
32                 bad_synchronization(false),
33                 bad_sc_read(false),
34                 asserted(false)
35         { }
36
37         ~model_snapshot_members() {
38                 for (unsigned int i = 0; i < bugs.size(); i++)
39                         delete bugs[i];
40                 bugs.clear();
41         }
42
43         unsigned int next_thread_id;
44         modelclock_t used_sequence_numbers;
45         ModelAction *next_backtrack;
46         SnapVector<bug_message *> bugs;
47         bool too_many_reads;
48         bool no_valid_reads;
49         /** @brief Incorrectly-ordered synchronization was made */
50         bool bad_synchronization;
51         bool bad_sc_read;
52         bool asserted;
53
54         SNAPSHOTALLOC
55 };
56
57 /** @brief Constructor */
58 ModelExecution::ModelExecution(ModelChecker *m,
59                 Scheduler *scheduler,
60                 NodeStack *node_stack) :
61         model(m),
62         params(NULL),
63         scheduler(scheduler),
64         action_trace(),
65         thread_map(2), /* We'll always need at least 2 threads */
66         pthread_map(0),
67         pthread_counter(0),
68         obj_map(),
69         condvar_waiters_map(),
70         obj_thrd_map(),
71         mutex_map(),
72         thrd_last_action(1),
73         thrd_last_fence_release(),
74         node_stack(node_stack),
75         priv(new struct model_snapshot_members()),
76         mo_graph(new CycleGraph())
77 {
78         /* Initialize a model-checker thread, for special ModelActions */
79         model_thread = new Thread(get_next_id());
80         add_thread(model_thread);
81         scheduler->register_engine(this);
82         node_stack->register_engine(this);
83 }
84
85 /** @brief Destructor */
86 ModelExecution::~ModelExecution()
87 {
88         for (unsigned int i = 0; i < get_num_threads(); i++)
89                 delete get_thread(int_to_id(i));
90
91         delete mo_graph;
92         delete priv;
93 }
94
95 int ModelExecution::get_execution_number() const
96 {
97         return model->get_execution_number();
98 }
99
100 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
101 {
102         action_list_t *tmp = hash->get(ptr);
103         if (tmp == NULL) {
104                 tmp = new action_list_t();
105                 hash->put(ptr, tmp);
106         }
107         return tmp;
108 }
109
110 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
111 {
112         SnapVector<action_list_t> *tmp = hash->get(ptr);
113         if (tmp == NULL) {
114                 tmp = new SnapVector<action_list_t>();
115                 hash->put(ptr, tmp);
116         }
117         return tmp;
118 }
119
120 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
121 {
122         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
123         if (wrv==NULL)
124                 return NULL;
125         unsigned int thread=id_to_int(tid);
126         if (thread < wrv->size())
127                 return &(*wrv)[thread];
128         else
129                 return NULL;
130 }
131
132 /** @return a thread ID for a new Thread */
133 thread_id_t ModelExecution::get_next_id()
134 {
135         return priv->next_thread_id++;
136 }
137
138 /** @return the number of user threads created during this execution */
139 unsigned int ModelExecution::get_num_threads() const
140 {
141         return priv->next_thread_id;
142 }
143
144 /** @return a sequence number for a new ModelAction */
145 modelclock_t ModelExecution::get_next_seq_num()
146 {
147         return ++priv->used_sequence_numbers;
148 }
149
150 /**
151  * @brief Should the current action wake up a given thread?
152  *
153  * @param curr The current action
154  * @param thread The thread that we might wake up
155  * @return True, if we should wake up the sleeping thread; false otherwise
156  */
157 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
158 {
159         const ModelAction *asleep = thread->get_pending();
160         /* Don't allow partial RMW to wake anyone up */
161         if (curr->is_rmwr())
162                 return false;
163         /* Synchronizing actions may have been backtracked */
164         if (asleep->could_synchronize_with(curr))
165                 return true;
166         /* All acquire/release fences and fence-acquire/store-release */
167         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
168                 return true;
169         /* Fence-release + store can awake load-acquire on the same location */
170         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
171                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
172                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
173                         return true;
174         }
175         return false;
176 }
177
178 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
179 {
180         for (unsigned int i = 0; i < get_num_threads(); i++) {
181                 Thread *thr = get_thread(int_to_id(i));
182                 if (scheduler->is_sleep_set(thr)) {
183                         if (should_wake_up(curr, thr))
184                                 /* Remove this thread from sleep set */
185                                 scheduler->remove_sleep(thr);
186                 }
187         }
188 }
189
190 /** @brief Alert the model-checker that an incorrectly-ordered
191  * synchronization was made */
192 void ModelExecution::set_bad_synchronization()
193 {
194         priv->bad_synchronization = true;
195 }
196
197 /** @brief Alert the model-checker that an incorrectly-ordered
198  * synchronization was made */
199 void ModelExecution::set_bad_sc_read()
200 {
201         priv->bad_sc_read = true;
202 }
203
204 bool ModelExecution::assert_bug(const char *msg)
205 {
206         priv->bugs.push_back(new bug_message(msg));
207
208         if (isfeasibleprefix()) {
209                 set_assert();
210                 return true;
211         }
212         return false;
213 }
214
215 /** @return True, if any bugs have been reported for this execution */
216 bool ModelExecution::have_bug_reports() const
217 {
218         return priv->bugs.size() != 0;
219 }
220
221 SnapVector<bug_message *> * ModelExecution::get_bugs() const
222 {
223         return &priv->bugs;
224 }
225
226 /**
227  * Check whether the current trace has triggered an assertion which should halt
228  * its execution.
229  *
230  * @return True, if the execution should be aborted; false otherwise
231  */
232 bool ModelExecution::has_asserted() const
233 {
234         return priv->asserted;
235 }
236
237 /**
238  * Trigger a trace assertion which should cause this execution to be halted.
239  * This can be due to a detected bug or due to an infeasibility that should
240  * halt ASAP.
241  */
242 void ModelExecution::set_assert()
243 {
244         priv->asserted = true;
245 }
246
247 /**
248  * Check if we are in a deadlock. Should only be called at the end of an
249  * execution, although it should not give false positives in the middle of an
250  * execution (there should be some ENABLED thread).
251  *
252  * @return True if program is in a deadlock; false otherwise
253  */
254 bool ModelExecution::is_deadlocked() const
255 {
256         bool blocking_threads = false;
257         for (unsigned int i = 0; i < get_num_threads(); i++) {
258                 thread_id_t tid = int_to_id(i);
259                 if (is_enabled(tid))
260                         return false;
261                 Thread *t = get_thread(tid);
262                 if (!t->is_model_thread() && t->get_pending())
263                         blocking_threads = true;
264         }
265         return blocking_threads;
266 }
267
268 /**
269  * Check if this is a complete execution. That is, have all thread completed
270  * execution (rather than exiting because sleep sets have forced a redundant
271  * execution).
272  *
273  * @return True if the execution is complete.
274  */
275 bool ModelExecution::is_complete_execution() const
276 {
277         for (unsigned int i = 0; i < get_num_threads(); i++)
278                 if (is_enabled(int_to_id(i)))
279                         return false;
280         return true;
281 }
282
283
284 /**
285  * Processes a read model action.
286  * @param curr is the read model action to process.
287  * @param rf_set is the set of model actions we can possibly read from
288  * @return True if processing this read updates the mo_graph.
289  */
290 bool ModelExecution::process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set)
291 {
292         int random_index = random() % rf_set->size();
293         bool updated = false;
294                 
295         const ModelAction *rf = (*rf_set)[random_index];
296         ASSERT(rf);
297         
298         mo_graph->startChanges();
299         
300         updated = r_modification_order(curr, rf);
301         read_from(curr, rf);
302         mo_graph->commitChanges();
303         get_thread(curr)->set_return_value(curr->get_return_value());
304         return updated;
305 }
306
307 /**
308  * Processes a lock, trylock, or unlock model action.  @param curr is
309  * the read model action to process.
310  *
311  * The try lock operation checks whether the lock is taken.  If not,
312  * it falls to the normal lock operation case.  If so, it returns
313  * fail.
314  *
315  * The lock operation has already been checked that it is enabled, so
316  * it just grabs the lock and synchronizes with the previous unlock.
317  *
318  * The unlock operation has to re-enable all of the threads that are
319  * waiting on the lock.
320  *
321  * @return True if synchronization was updated; false otherwise
322  */
323 bool ModelExecution::process_mutex(ModelAction *curr)
324 {
325         cdsc::mutex *mutex = curr->get_mutex();
326         struct cdsc::mutex_state *state = NULL;
327
328         if (mutex)
329                 state = mutex->get_state();
330
331         switch (curr->get_type()) {
332         case ATOMIC_TRYLOCK: {
333                 bool success = !state->locked;
334                 curr->set_try_lock(success);
335                 if (!success) {
336                         get_thread(curr)->set_return_value(0);
337                         break;
338                 }
339                 get_thread(curr)->set_return_value(1);
340         }
341                 //otherwise fall into the lock case
342         case ATOMIC_LOCK: {
343                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
344                         assert_bug("Lock access before initialization");
345                 state->locked = get_thread(curr);
346                 ModelAction *unlock = get_last_unlock(curr);
347                 //synchronize with the previous unlock statement
348                 if (unlock != NULL) {
349                         synchronize(unlock, curr);
350                         return true;
351                 }
352                 break;
353         }
354         case ATOMIC_WAIT:
355         case ATOMIC_UNLOCK: {
356                 /* wake up the other threads */
357                 for (unsigned int i = 0; i < get_num_threads(); i++) {
358                         Thread *t = get_thread(int_to_id(i));
359                         Thread *curr_thrd = get_thread(curr);
360                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
361                                 scheduler->wake(t);
362                 }
363
364                 /* unlock the lock - after checking who was waiting on it */
365                 state->locked = NULL;
366
367                 if (!curr->is_wait())
368                         break; /* The rest is only for ATOMIC_WAIT */
369
370                 break;
371         }
372         case ATOMIC_NOTIFY_ALL: {
373                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
374                 //activate all the waiting threads
375                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
376                         scheduler->wake(get_thread(*rit));
377                 }
378                 waiters->clear();
379                 break;
380         }
381         case ATOMIC_NOTIFY_ONE: {
382                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
383
384                 //BCD -- TOFIX FUZZER
385                 //THIS SHOULD BE A RANDOM CHOICE
386                 //              int wakeupthread = curr->get_node()->get_misc();
387                 int wakeupthread = 0;
388                 action_list_t::iterator it = waiters->begin();
389
390                 // WL
391                 if (it == waiters->end())
392                         break;
393
394                 advance(it, wakeupthread);
395                 scheduler->wake(get_thread(*it));
396                 waiters->erase(it);
397                 break;
398         }
399
400         default:
401                 ASSERT(0);
402         }
403         return false;
404 }
405
406 /**
407  * Process a write ModelAction
408  * @param curr The ModelAction to process
409  * @return True if the mo_graph was updated or promises were resolved
410  */
411 bool ModelExecution::process_write(ModelAction *curr)
412 {
413
414         bool updated_mod_order = w_modification_order(curr);
415
416         mo_graph->commitChanges();
417
418         get_thread(curr)->set_return_value(VALUE_NONE);
419         return updated_mod_order;
420 }
421
422 /**
423  * Process a fence ModelAction
424  * @param curr The ModelAction to process
425  * @return True if synchronization was updated
426  */
427 bool ModelExecution::process_fence(ModelAction *curr)
428 {
429         /*
430          * fence-relaxed: no-op
431          * fence-release: only log the occurence (not in this function), for
432          *   use in later synchronization
433          * fence-acquire (this function): search for hypothetical release
434          *   sequences
435          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
436          */
437         bool updated = false;
438         if (curr->is_acquire()) {
439                 action_list_t *list = &action_trace;
440                 action_list_t::reverse_iterator rit;
441                 /* Find X : is_read(X) && X --sb-> curr */
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *act = *rit;
444                         if (act == curr)
445                                 continue;
446                         if (act->get_tid() != curr->get_tid())
447                                 continue;
448                         /* Stop at the beginning of the thread */
449                         if (act->is_thread_start())
450                                 break;
451                         /* Stop once we reach a prior fence-acquire */
452                         if (act->is_fence() && act->is_acquire())
453                                 break;
454                         if (!act->is_read())
455                                 continue;
456                         /* read-acquire will find its own release sequences */
457                         if (act->is_acquire())
458                                 continue;
459
460                         /* Establish hypothetical release sequences */
461                         rel_heads_list_t release_heads;
462                         get_release_seq_heads(curr, act, &release_heads);
463                         for (unsigned int i = 0; i < release_heads.size(); i++)
464                                 synchronize(release_heads[i], curr);
465                         if (release_heads.size() != 0)
466                                 updated = true;
467                 }
468         }
469         return updated;
470 }
471
472 /**
473  * @brief Process the current action for thread-related activity
474  *
475  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
476  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
477  * synchronization, etc.  This function is a no-op for non-THREAD actions
478  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
479  *
480  * @param curr The current action
481  * @return True if synchronization was updated or a thread completed
482  */
483 bool ModelExecution::process_thread_action(ModelAction *curr)
484 {
485         bool updated = false;
486
487         switch (curr->get_type()) {
488         case THREAD_CREATE: {
489                 thrd_t *thrd = (thrd_t *)curr->get_location();
490                 struct thread_params *params = (struct thread_params *)curr->get_value();
491                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
492                 curr->set_thread_operand(th);
493                 add_thread(th);
494                 th->set_creation(curr);
495                 break;
496         }
497         case PTHREAD_CREATE: {
498           (*(uint32_t *)curr->get_location()) = pthread_counter++;
499
500                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
501                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
502                 curr->set_thread_operand(th);
503                 add_thread(th);
504                 th->set_creation(curr);
505
506                 if ( pthread_map.size() < pthread_counter )
507                         pthread_map.resize( pthread_counter );
508                 pthread_map[ pthread_counter-1 ] = th;
509
510                 break;
511         }
512         case THREAD_JOIN: {
513                 Thread *blocking = curr->get_thread_operand();
514                 ModelAction *act = get_last_action(blocking->get_id());
515                 synchronize(act, curr);
516                 updated = true; /* trigger rel-seq checks */
517                 break;
518         }
519         case PTHREAD_JOIN: {
520                 Thread *blocking = curr->get_thread_operand();
521                 ModelAction *act = get_last_action(blocking->get_id());
522                 synchronize(act, curr);
523                 updated = true; /* trigger rel-seq checks */
524                 break; // WL: to be add (modified)
525         }
526
527         case THREAD_FINISH: {
528                 Thread *th = get_thread(curr);
529                 /* Wake up any joining threads */
530                 for (unsigned int i = 0; i < get_num_threads(); i++) {
531                         Thread *waiting = get_thread(int_to_id(i));
532                         if (waiting->waiting_on() == th &&
533                                         waiting->get_pending()->is_thread_join())
534                                 scheduler->wake(waiting);
535                 }
536                 th->complete();
537                 updated = true; /* trigger rel-seq checks */
538                 break;
539         }
540         case THREAD_START: {
541                 break;
542         }
543         default:
544                 break;
545         }
546
547         return updated;
548 }
549
550 /**
551  * Initialize the current action by performing one or more of the following
552  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
553  * in the NodeStack, manipulating backtracking sets, allocating and
554  * initializing clock vectors, and computing the promises to fulfill.
555  *
556  * @param curr The current action, as passed from the user context; may be
557  * freed/invalidated after the execution of this function, with a different
558  * action "returned" its place (pass-by-reference)
559  * @return True if curr is a newly-explored action; false otherwise
560  */
561 bool ModelExecution::initialize_curr_action(ModelAction **curr)
562 {
563         ModelAction *newcurr;
564
565         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
566                 newcurr = process_rmw(*curr);
567                 delete *curr;
568
569                 *curr = newcurr;
570                 return false;
571         }
572
573         (*curr)->set_seq_number(get_next_seq_num());
574
575         newcurr = node_stack->explore_action(*curr);
576         if (newcurr) {
577                 /* First restore type and order in case of RMW operation */
578                 if ((*curr)->is_rmwr())
579                         newcurr->copy_typeandorder(*curr);
580
581                 ASSERT((*curr)->get_location() == newcurr->get_location());
582                 newcurr->copy_from_new(*curr);
583
584                 /* Discard duplicate ModelAction; use action from NodeStack */
585                 delete *curr;
586
587                 /* Always compute new clock vector */
588                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
589
590                 *curr = newcurr;
591                 return false; /* Action was explored previously */
592         } else {
593                 newcurr = *curr;
594
595                 /* Always compute new clock vector */
596                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
597
598                 /* Assign most recent release fence */
599                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
600
601                 return true; /* This was a new ModelAction */
602         }
603 }
604
605 /**
606  * @brief Establish reads-from relation between two actions
607  *
608  * Perform basic operations involved with establishing a concrete rf relation,
609  * including setting the ModelAction data and checking for release sequences.
610  *
611  * @param act The action that is reading (must be a read)
612  * @param rf The action from which we are reading (must be a write)
613  *
614  * @return True if this read established synchronization
615  */
616
617 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
618 {
619         ASSERT(rf);
620         ASSERT(rf->is_write());
621
622         act->set_read_from(rf);
623         if (act->is_acquire()) {
624                 rel_heads_list_t release_heads;
625                 get_release_seq_heads(act, act, &release_heads);
626                 int num_heads = release_heads.size();
627                 for (unsigned int i = 0; i < release_heads.size(); i++)
628                         if (!synchronize(release_heads[i], act))
629                                 num_heads--;
630                 return num_heads > 0;
631         }
632         return false;
633 }
634
635 /**
636  * @brief Synchronizes two actions
637  *
638  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
639  * This function performs the synchronization as well as providing other hooks
640  * for other checks along with synchronization.
641  *
642  * @param first The left-hand side of the synchronizes-with relation
643  * @param second The right-hand side of the synchronizes-with relation
644  * @return True if the synchronization was successful (i.e., was consistent
645  * with the execution order); false otherwise
646  */
647 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
648 {
649         if (*second < *first) {
650                 set_bad_synchronization();
651                 return false;
652         }
653         return second->synchronize_with(first);
654 }
655
656 /**
657  * @brief Check whether a model action is enabled.
658  *
659  * Checks whether an operation would be successful (i.e., is a lock already
660  * locked, or is the joined thread already complete).
661  *
662  * For yield-blocking, yields are never enabled.
663  *
664  * @param curr is the ModelAction to check whether it is enabled.
665  * @return a bool that indicates whether the action is enabled.
666  */
667 bool ModelExecution::check_action_enabled(ModelAction *curr) {
668         if (curr->is_lock()) {
669                 cdsc::mutex *lock = curr->get_mutex();
670                 struct cdsc::mutex_state *state = lock->get_state();
671                 if (state->locked)
672                         return false;
673         } else if (curr->is_thread_join()) {
674                 Thread *blocking = curr->get_thread_operand();
675                 if (!blocking->is_complete()) {
676                         return false;
677                 }
678         }
679
680         return true;
681 }
682
683 /**
684  * This is the heart of the model checker routine. It performs model-checking
685  * actions corresponding to a given "current action." Among other processes, it
686  * calculates reads-from relationships, updates synchronization clock vectors,
687  * forms a memory_order constraints graph, and handles replay/backtrack
688  * execution when running permutations of previously-observed executions.
689  *
690  * @param curr The current action to process
691  * @return The ModelAction that is actually executed; may be different than
692  * curr
693  */
694 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
695 {
696         ASSERT(curr);
697         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
698         bool newly_explored = initialize_curr_action(&curr);
699
700         DBG();
701
702         wake_up_sleeping_actions(curr);
703
704         /* Add the action to lists before any other model-checking tasks */
705         if (!second_part_of_rmw)
706                 add_action_to_lists(curr);
707
708         ModelVector<ModelAction *> * rf_set = NULL;
709         /* Build may_read_from set for newly-created actions */
710         if (newly_explored && curr->is_read())
711           rf_set = build_may_read_from(curr);
712
713         process_thread_action(curr);
714         
715         if (curr->is_read() && !second_part_of_rmw) {
716           process_read(curr, rf_set);
717           delete rf_set;
718         }
719         
720         if (curr->is_write())
721           process_write(curr);
722         
723         if (curr->is_fence())
724           process_fence(curr);
725         
726         if (curr->is_mutex_op())
727           process_mutex(curr);
728
729         return curr;
730 }
731
732 /**
733  * This is the strongest feasibility check available.
734  * @return whether the current trace (partial or complete) must be a prefix of
735  * a feasible trace.
736  */
737 bool ModelExecution::isfeasibleprefix() const
738 {
739   return !is_infeasible();
740 }
741
742 /**
743  * Print disagnostic information about an infeasible execution
744  * @param prefix A string to prefix the output with; if NULL, then a default
745  * message prefix will be provided
746  */
747 void ModelExecution::print_infeasibility(const char *prefix) const
748 {
749         char buf[100];
750         char *ptr = buf;
751         if (mo_graph->checkForCycles())
752                 ptr += sprintf(ptr, "[mo cycle]");
753         if (priv->too_many_reads)
754                 ptr += sprintf(ptr, "[too many reads]");
755         if (priv->no_valid_reads)
756                 ptr += sprintf(ptr, "[no valid reads-from]");
757         if (priv->bad_synchronization)
758                 ptr += sprintf(ptr, "[bad sw ordering]");
759         if (priv->bad_sc_read)
760                 ptr += sprintf(ptr, "[bad sc read]");
761         if (ptr != buf)
762                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
763 }
764
765 /**
766  * Check if the current partial trace is infeasible. Does not check any
767  * end-of-execution flags, which might rule out the execution. Thus, this is
768  * useful only for ruling an execution as infeasible.
769  * @return whether the current partial trace is infeasible.
770  */
771 bool ModelExecution::is_infeasible() const
772 {
773         return mo_graph->checkForCycles() ||
774                 priv->no_valid_reads ||
775                 priv->too_many_reads ||
776                 priv->bad_synchronization ||
777           priv->bad_sc_read;
778 }
779
780 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
781 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
782         ModelAction *lastread = get_last_action(act->get_tid());
783         lastread->process_rmw(act);
784         if (act->is_rmw()) {
785           mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
786                 mo_graph->commitChanges();
787         }
788         return lastread;
789 }
790
791 /**
792  * @brief Updates the mo_graph with the constraints imposed from the current
793  * read.
794  *
795  * Basic idea is the following: Go through each other thread and find
796  * the last action that happened before our read.  Two cases:
797  *
798  * -# The action is a write: that write must either occur before
799  * the write we read from or be the write we read from.
800  * -# The action is a read: the write that that action read from
801  * must occur before the write we read from or be the same write.
802  *
803  * @param curr The current action. Must be a read.
804  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
805  * @return True if modification order edges were added; false otherwise
806  */
807 template <typename rf_type>
808 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
809 {
810         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
811         unsigned int i;
812         bool added = false;
813         ASSERT(curr->is_read());
814
815         /* Last SC fence in the current thread */
816         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
817         ModelAction *last_sc_write = NULL;
818         if (curr->is_seqcst())
819                 last_sc_write = get_last_seq_cst_write(curr);
820
821         /* Iterate over all threads */
822         for (i = 0; i < thrd_lists->size(); i++) {
823                 /* Last SC fence in thread i */
824                 ModelAction *last_sc_fence_thread_local = NULL;
825                 if (int_to_id((int)i) != curr->get_tid())
826                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
827
828                 /* Last SC fence in thread i, before last SC fence in current thread */
829                 ModelAction *last_sc_fence_thread_before = NULL;
830                 if (last_sc_fence_local)
831                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
832
833                 /* Iterate over actions in thread, starting from most recent */
834                 action_list_t *list = &(*thrd_lists)[i];
835                 action_list_t::reverse_iterator rit;
836                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
837                         ModelAction *act = *rit;
838
839                         /* Skip curr */
840                         if (act == curr)
841                                 continue;
842                         /* Don't want to add reflexive edges on 'rf' */
843                         if (act->equals(rf)) {
844                                 if (act->happens_before(curr))
845                                         break;
846                                 else
847                                         continue;
848                         }
849
850                         if (act->is_write()) {
851                                 /* C++, Section 29.3 statement 5 */
852                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
853                                                 *act < *last_sc_fence_thread_local) {
854                                         added = mo_graph->addEdge(act, rf) || added;
855                                         break;
856                                 }
857                                 /* C++, Section 29.3 statement 4 */
858                                 else if (act->is_seqcst() && last_sc_fence_local &&
859                                                 *act < *last_sc_fence_local) {
860                                         added = mo_graph->addEdge(act, rf) || added;
861                                         break;
862                                 }
863                                 /* C++, Section 29.3 statement 6 */
864                                 else if (last_sc_fence_thread_before &&
865                                                 *act < *last_sc_fence_thread_before) {
866                                         added = mo_graph->addEdge(act, rf) || added;
867                                         break;
868                                 }
869                         }
870
871                         /*
872                          * Include at most one act per-thread that "happens
873                          * before" curr
874                          */
875                         if (act->happens_before(curr)) {
876                                 if (act->is_write()) {
877                                         added = mo_graph->addEdge(act, rf) || added;
878                                 } else {
879                                         const ModelAction *prevrf = act->get_reads_from();
880                                         if (!prevrf->equals(rf))
881                                           added = mo_graph->addEdge(prevrf, rf) || added;
882                                 }
883                                 break;
884                         }
885                 }
886         }
887
888         return added;
889 }
890
891 /**
892  * Updates the mo_graph with the constraints imposed from the current write.
893  *
894  * Basic idea is the following: Go through each other thread and find
895  * the lastest action that happened before our write.  Two cases:
896  *
897  * (1) The action is a write => that write must occur before
898  * the current write
899  *
900  * (2) The action is a read => the write that that action read from
901  * must occur before the current write.
902  *
903  * This method also handles two other issues:
904  *
905  * (I) Sequential Consistency: Making sure that if the current write is
906  * seq_cst, that it occurs after the previous seq_cst write.
907  *
908  * (II) Sending the write back to non-synchronizing reads.
909  *
910  * @param curr The current action. Must be a write.
911  * @param send_fv A vector for stashing reads to which we may pass our future
912  * value. If NULL, then don't record any future values.
913  * @return True if modification order edges were added; false otherwise
914  */
915 bool ModelExecution::w_modification_order(ModelAction *curr)
916 {
917         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
918         unsigned int i;
919         bool added = false;
920         ASSERT(curr->is_write());
921
922         if (curr->is_seqcst()) {
923                 /* We have to at least see the last sequentially consistent write,
924                          so we are initialized. */
925                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
926                 if (last_seq_cst != NULL) {
927                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
928                 }
929         }
930
931         /* Last SC fence in the current thread */
932         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
933
934         /* Iterate over all threads */
935         for (i = 0; i < thrd_lists->size(); i++) {
936                 /* Last SC fence in thread i, before last SC fence in current thread */
937                 ModelAction *last_sc_fence_thread_before = NULL;
938                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
939                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
940
941                 /* Iterate over actions in thread, starting from most recent */
942                 action_list_t *list = &(*thrd_lists)[i];
943                 action_list_t::reverse_iterator rit;
944                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
945                         ModelAction *act = *rit;
946                         if (act == curr) {
947                                 /*
948                                  * 1) If RMW and it actually read from something, then we
949                                  * already have all relevant edges, so just skip to next
950                                  * thread.
951                                  *
952                                  * 2) If RMW and it didn't read from anything, we should
953                                  * whatever edge we can get to speed up convergence.
954                                  *
955                                  * 3) If normal write, we need to look at earlier actions, so
956                                  * continue processing list.
957                                  */
958                                 if (curr->is_rmw()) {
959                                         if (curr->get_reads_from() != NULL)
960                                                 break;
961                                         else
962                                                 continue;
963                                 } else
964                                         continue;
965                         }
966
967                         /* C++, Section 29.3 statement 7 */
968                         if (last_sc_fence_thread_before && act->is_write() &&
969                                         *act < *last_sc_fence_thread_before) {
970                                 added = mo_graph->addEdge(act, curr) || added;
971                                 break;
972                         }
973
974                         /*
975                          * Include at most one act per-thread that "happens
976                          * before" curr
977                          */
978                         if (act->happens_before(curr)) {
979                                 /*
980                                  * Note: if act is RMW, just add edge:
981                                  *   act --mo--> curr
982                                  * The following edge should be handled elsewhere:
983                                  *   readfrom(act) --mo--> act
984                                  */
985                                 if (act->is_write())
986                                         added = mo_graph->addEdge(act, curr) || added;
987                                 else if (act->is_read()) {
988                                         //if previous read accessed a null, just keep going
989                                   added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
990                                 }
991                                 break;
992                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
993                                                      !act->same_thread(curr)) {
994                                 /* We have an action that:
995                                    (1) did not happen before us
996                                    (2) is a read and we are a write
997                                    (3) cannot synchronize with us
998                                    (4) is in a different thread
999                                    =>
1000                                    that read could potentially read from our write.  Note that
1001                                    these checks are overly conservative at this point, we'll
1002                                    do more checks before actually removing the
1003                                    pendingfuturevalue.
1004
1005                                  */
1006
1007                         }
1008                 }
1009         }
1010
1011         return added;
1012 }
1013
1014 /**
1015  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1016  * some constraints. This method checks one the following constraint (others
1017  * require compiler support):
1018  *
1019  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1020  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1021  */
1022 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1023 {
1024         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1025         unsigned int i;
1026         /* Iterate over all threads */
1027         for (i = 0; i < thrd_lists->size(); i++) {
1028                 const ModelAction *write_after_read = NULL;
1029
1030                 /* Iterate over actions in thread, starting from most recent */
1031                 action_list_t *list = &(*thrd_lists)[i];
1032                 action_list_t::reverse_iterator rit;
1033                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1034                         ModelAction *act = *rit;
1035
1036                         /* Don't disallow due to act == reader */
1037                         if (!reader->happens_before(act) || reader == act)
1038                                 break;
1039                         else if (act->is_write())
1040                                 write_after_read = act;
1041                         else if (act->is_read() && act->get_reads_from() != NULL)
1042                                 write_after_read = act->get_reads_from();
1043                 }
1044
1045                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1046                         return false;
1047         }
1048         return true;
1049 }
1050
1051 /**
1052  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1053  * The ModelAction under consideration is expected to be taking part in
1054  * release/acquire synchronization as an object of the "reads from" relation.
1055  * Note that this can only provide release sequence support for RMW chains
1056  * which do not read from the future, as those actions cannot be traced until
1057  * their "promise" is fulfilled. Similarly, we may not even establish the
1058  * presence of a release sequence with certainty, as some modification order
1059  * constraints may be decided further in the future. Thus, this function
1060  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1061  * and a boolean representing certainty.
1062  *
1063  * @param rf The action that might be part of a release sequence. Must be a
1064  * write.
1065  * @param release_heads A pass-by-reference style return parameter. After
1066  * execution of this function, release_heads will contain the heads of all the
1067  * relevant release sequences, if any exists with certainty
1068  * @return true, if the ModelExecution is certain that release_heads is complete;
1069  * false otherwise
1070  */
1071 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1072                 rel_heads_list_t *release_heads) const
1073 {
1074         /* Only check for release sequences if there are no cycles */
1075         if (mo_graph->checkForCycles())
1076                 return false;
1077
1078         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1079                 ASSERT(rf->is_write());
1080
1081                 if (rf->is_release())
1082                         release_heads->push_back(rf);
1083                 else if (rf->get_last_fence_release())
1084                         release_heads->push_back(rf->get_last_fence_release());
1085                 if (!rf->is_rmw())
1086                         break; /* End of RMW chain */
1087
1088                 /** @todo Need to be smarter here...  In the linux lock
1089                  * example, this will run to the beginning of the program for
1090                  * every acquire. */
1091                 /** @todo The way to be smarter here is to keep going until 1
1092                  * thread has a release preceded by an acquire and you've seen
1093                  *       both. */
1094
1095                 /* acq_rel RMW is a sufficient stopping condition */
1096                 if (rf->is_acquire() && rf->is_release())
1097                         return true; /* complete */
1098         };
1099         ASSERT(rf); // Needs to be real write
1100
1101         if (rf->is_release())
1102                 return true; /* complete */
1103
1104         /* else relaxed write
1105          * - check for fence-release in the same thread (29.8, stmt. 3)
1106          * - check modification order for contiguous subsequence
1107          *   -> rf must be same thread as release */
1108
1109         const ModelAction *fence_release = rf->get_last_fence_release();
1110         /* Synchronize with a fence-release unconditionally; we don't need to
1111          * find any more "contiguous subsequence..." for it */
1112         if (fence_release)
1113                 release_heads->push_back(fence_release);
1114
1115         return true; /* complete */
1116 }
1117
1118 /**
1119  * An interface for getting the release sequence head(s) with which a
1120  * given ModelAction must synchronize. This function only returns a non-empty
1121  * result when it can locate a release sequence head with certainty. Otherwise,
1122  * it may mark the internal state of the ModelExecution so that it will handle
1123  * the release sequence at a later time, causing @a acquire to update its
1124  * synchronization at some later point in execution.
1125  *
1126  * @param acquire The 'acquire' action that may synchronize with a release
1127  * sequence
1128  * @param read The read action that may read from a release sequence; this may
1129  * be the same as acquire, or else an earlier action in the same thread (i.e.,
1130  * when 'acquire' is a fence-acquire)
1131  * @param release_heads A pass-by-reference return parameter. Will be filled
1132  * with the head(s) of the release sequence(s), if they exists with certainty.
1133  * @see ModelExecution::release_seq_heads
1134  */
1135 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
1136                 ModelAction *read, rel_heads_list_t *release_heads)
1137 {
1138         const ModelAction *rf = read->get_reads_from();
1139
1140         release_seq_heads(rf, release_heads);
1141 }
1142
1143 /**
1144  * Performs various bookkeeping operations for the current ModelAction. For
1145  * instance, adds action to the per-object, per-thread action vector and to the
1146  * action trace list of all thread actions.
1147  *
1148  * @param act is the ModelAction to add.
1149  */
1150 void ModelExecution::add_action_to_lists(ModelAction *act)
1151 {
1152         int tid = id_to_int(act->get_tid());
1153         ModelAction *uninit = NULL;
1154         int uninit_id = -1;
1155         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1156         if (list->empty() && act->is_atomic_var()) {
1157                 uninit = get_uninitialized_action(act);
1158                 uninit_id = id_to_int(uninit->get_tid());
1159                 list->push_front(uninit);
1160         }
1161         list->push_back(act);
1162
1163         action_trace.push_back(act);
1164         if (uninit)
1165                 action_trace.push_front(uninit);
1166
1167         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1168         if (tid >= (int)vec->size())
1169                 vec->resize(priv->next_thread_id);
1170         (*vec)[tid].push_back(act);
1171         if (uninit)
1172                 (*vec)[uninit_id].push_front(uninit);
1173
1174         if ((int)thrd_last_action.size() <= tid)
1175                 thrd_last_action.resize(get_num_threads());
1176         thrd_last_action[tid] = act;
1177         if (uninit)
1178                 thrd_last_action[uninit_id] = uninit;
1179
1180         if (act->is_fence() && act->is_release()) {
1181                 if ((int)thrd_last_fence_release.size() <= tid)
1182                         thrd_last_fence_release.resize(get_num_threads());
1183                 thrd_last_fence_release[tid] = act;
1184         }
1185
1186         if (act->is_wait()) {
1187                 void *mutex_loc = (void *) act->get_value();
1188                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1189
1190                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1191                 if (tid >= (int)vec->size())
1192                         vec->resize(priv->next_thread_id);
1193                 (*vec)[tid].push_back(act);
1194         }
1195 }
1196
1197 /**
1198  * @brief Get the last action performed by a particular Thread
1199  * @param tid The thread ID of the Thread in question
1200  * @return The last action in the thread
1201  */
1202 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1203 {
1204         int threadid = id_to_int(tid);
1205         if (threadid < (int)thrd_last_action.size())
1206                 return thrd_last_action[id_to_int(tid)];
1207         else
1208                 return NULL;
1209 }
1210
1211 /**
1212  * @brief Get the last fence release performed by a particular Thread
1213  * @param tid The thread ID of the Thread in question
1214  * @return The last fence release in the thread, if one exists; NULL otherwise
1215  */
1216 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1217 {
1218         int threadid = id_to_int(tid);
1219         if (threadid < (int)thrd_last_fence_release.size())
1220                 return thrd_last_fence_release[id_to_int(tid)];
1221         else
1222                 return NULL;
1223 }
1224
1225 /**
1226  * Gets the last memory_order_seq_cst write (in the total global sequence)
1227  * performed on a particular object (i.e., memory location), not including the
1228  * current action.
1229  * @param curr The current ModelAction; also denotes the object location to
1230  * check
1231  * @return The last seq_cst write
1232  */
1233 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1234 {
1235         void *location = curr->get_location();
1236         action_list_t *list = obj_map.get(location);
1237         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1238         action_list_t::reverse_iterator rit;
1239         for (rit = list->rbegin(); (*rit) != curr; rit++)
1240                 ;
1241         rit++; /* Skip past curr */
1242         for ( ; rit != list->rend(); rit++)
1243                 if ((*rit)->is_write() && (*rit)->is_seqcst())
1244                         return *rit;
1245         return NULL;
1246 }
1247
1248 /**
1249  * Gets the last memory_order_seq_cst fence (in the total global sequence)
1250  * performed in a particular thread, prior to a particular fence.
1251  * @param tid The ID of the thread to check
1252  * @param before_fence The fence from which to begin the search; if NULL, then
1253  * search for the most recent fence in the thread.
1254  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1255  */
1256 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1257 {
1258         /* All fences should have location FENCE_LOCATION */
1259         action_list_t *list = obj_map.get(FENCE_LOCATION);
1260
1261         if (!list)
1262                 return NULL;
1263
1264         action_list_t::reverse_iterator rit = list->rbegin();
1265
1266         if (before_fence) {
1267                 for (; rit != list->rend(); rit++)
1268                         if (*rit == before_fence)
1269                                 break;
1270
1271                 ASSERT(*rit == before_fence);
1272                 rit++;
1273         }
1274
1275         for (; rit != list->rend(); rit++)
1276                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1277                         return *rit;
1278         return NULL;
1279 }
1280
1281 /**
1282  * Gets the last unlock operation performed on a particular mutex (i.e., memory
1283  * location). This function identifies the mutex according to the current
1284  * action, which is presumed to perform on the same mutex.
1285  * @param curr The current ModelAction; also denotes the object location to
1286  * check
1287  * @return The last unlock operation
1288  */
1289 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1290 {
1291         void *location = curr->get_location();
1292
1293         action_list_t *list = obj_map.get(location);
1294         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1295         action_list_t::reverse_iterator rit;
1296         for (rit = list->rbegin(); rit != list->rend(); rit++)
1297                 if ((*rit)->is_unlock() || (*rit)->is_wait())
1298                         return *rit;
1299         return NULL;
1300 }
1301
1302 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1303 {
1304         ModelAction *parent = get_last_action(tid);
1305         if (!parent)
1306                 parent = get_thread(tid)->get_creation();
1307         return parent;
1308 }
1309
1310 /**
1311  * Returns the clock vector for a given thread.
1312  * @param tid The thread whose clock vector we want
1313  * @return Desired clock vector
1314  */
1315 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1316 {
1317         return get_parent_action(tid)->get_cv();
1318 }
1319
1320 /**
1321  * Build up an initial set of all past writes that this 'read' action may read
1322  * from, as well as any previously-observed future values that must still be valid.
1323  *
1324  * @param curr is the current ModelAction that we are exploring; it must be a
1325  * 'read' operation.
1326  */
1327 ModelVector<ModelAction *> *  ModelExecution::build_may_read_from(ModelAction *curr)
1328 {
1329         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1330         unsigned int i;
1331         ASSERT(curr->is_read());
1332
1333         ModelAction *last_sc_write = NULL;
1334
1335         if (curr->is_seqcst())
1336                 last_sc_write = get_last_seq_cst_write(curr);
1337
1338         ModelVector<ModelAction *> * rf_set = new ModelVector<ModelAction *>();
1339         
1340         /* Iterate over all threads */
1341         for (i = 0; i < thrd_lists->size(); i++) {
1342                 /* Iterate over actions in thread, starting from most recent */
1343                 action_list_t *list = &(*thrd_lists)[i];
1344                 action_list_t::reverse_iterator rit;
1345                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1346                         ModelAction *act = *rit;
1347
1348                         /* Only consider 'write' actions */
1349                         if (!act->is_write() || act == curr)
1350                                 continue;
1351
1352                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1353                         bool allow_read = true;
1354
1355                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1356                                 allow_read = false;
1357
1358                         if (allow_read) {
1359                                 /* Only add feasible reads */
1360                                 mo_graph->startChanges();
1361                                 r_modification_order(curr, act);
1362                                 if (!is_infeasible())
1363                                   rf_set->push_back(act);
1364                                 mo_graph->rollbackChanges();
1365                         }
1366
1367                         /* Include at most one act per-thread that "happens before" curr */
1368                         if (act->happens_before(curr))
1369                                 break;
1370                 }
1371         }
1372
1373         if (DBG_ENABLED()) {
1374                 model_print("Reached read action:\n");
1375                 curr->print();
1376                 model_print("End printing read_from_past\n");
1377         }
1378         return rf_set;
1379 }
1380
1381 /**
1382  * @brief Get an action representing an uninitialized atomic
1383  *
1384  * This function may create a new one or try to retrieve one from the NodeStack
1385  *
1386  * @param curr The current action, which prompts the creation of an UNINIT action
1387  * @return A pointer to the UNINIT ModelAction
1388  */
1389 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
1390 {
1391         Node *node = curr->get_node();
1392         ModelAction *act = node->get_uninit_action();
1393         if (!act) {
1394                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1395                 node->set_uninit_action(act);
1396         }
1397         act->create_cv(NULL);
1398         return act;
1399 }
1400
1401 static void print_list(const action_list_t *list)
1402 {
1403         action_list_t::const_iterator it;
1404
1405         model_print("------------------------------------------------------------------------------------\n");
1406         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
1407         model_print("------------------------------------------------------------------------------------\n");
1408
1409         unsigned int hash = 0;
1410
1411         for (it = list->begin(); it != list->end(); it++) {
1412                 const ModelAction *act = *it;
1413                 if (act->get_seq_number() > 0)
1414                         act->print();
1415                 hash = hash^(hash<<3)^((*it)->hash());
1416         }
1417         model_print("HASH %u\n", hash);
1418         model_print("------------------------------------------------------------------------------------\n");
1419 }
1420
1421 #if SUPPORT_MOD_ORDER_DUMP
1422 void ModelExecution::dumpGraph(char *filename) const
1423 {
1424         char buffer[200];
1425         sprintf(buffer, "%s.dot", filename);
1426         FILE *file = fopen(buffer, "w");
1427         fprintf(file, "digraph %s {\n", filename);
1428         mo_graph->dumpNodes(file);
1429         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1430
1431         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
1432                 ModelAction *act = *it;
1433                 if (act->is_read()) {
1434                         mo_graph->dot_print_node(file, act);
1435                         mo_graph->dot_print_edge(file,
1436                                                 act->get_reads_from(),
1437                                                 act,
1438                                                 "label=\"rf\", color=red, weight=2");
1439                 }
1440                 if (thread_array[act->get_tid()]) {
1441                         mo_graph->dot_print_edge(file,
1442                                         thread_array[id_to_int(act->get_tid())],
1443                                         act,
1444                                         "label=\"sb\", color=blue, weight=400");
1445                 }
1446
1447                 thread_array[act->get_tid()] = act;
1448         }
1449         fprintf(file, "}\n");
1450         model_free(thread_array);
1451         fclose(file);
1452 }
1453 #endif
1454
1455 /** @brief Prints an execution trace summary. */
1456 void ModelExecution::print_summary() const
1457 {
1458 #if SUPPORT_MOD_ORDER_DUMP
1459         char buffername[100];
1460         sprintf(buffername, "exec%04u", get_execution_number());
1461         mo_graph->dumpGraphToFile(buffername);
1462         sprintf(buffername, "graph%04u", get_execution_number());
1463         dumpGraph(buffername);
1464 #endif
1465
1466         model_print("Execution trace %d:", get_execution_number());
1467         if (isfeasibleprefix()) {
1468                 if (scheduler->all_threads_sleeping())
1469                         model_print(" SLEEP-SET REDUNDANT");
1470                 if (have_bug_reports())
1471                         model_print(" DETECTED BUG(S)");
1472         } else
1473                 print_infeasibility(" INFEASIBLE");
1474         model_print("\n");
1475
1476         print_list(&action_trace);
1477         model_print("\n");
1478
1479 }
1480
1481 /**
1482  * Add a Thread to the system for the first time. Should only be called once
1483  * per thread.
1484  * @param t The Thread to add
1485  */
1486 void ModelExecution::add_thread(Thread *t)
1487 {
1488         unsigned int i = id_to_int(t->get_id());
1489         if (i >= thread_map.size())
1490                 thread_map.resize(i + 1);
1491         thread_map[i] = t;
1492         if (!t->is_model_thread())
1493                 scheduler->add_thread(t);
1494 }
1495
1496 /**
1497  * @brief Get a Thread reference by its ID
1498  * @param tid The Thread's ID
1499  * @return A Thread reference
1500  */
1501 Thread * ModelExecution::get_thread(thread_id_t tid) const
1502 {
1503         unsigned int i = id_to_int(tid);
1504         if (i < thread_map.size())
1505                 return thread_map[i];
1506         return NULL;
1507 }
1508
1509 /**
1510  * @brief Get a reference to the Thread in which a ModelAction was executed
1511  * @param act The ModelAction
1512  * @return A Thread reference
1513  */
1514 Thread * ModelExecution::get_thread(const ModelAction *act) const
1515 {
1516         return get_thread(act->get_tid());
1517 }
1518
1519 /**
1520  * @brief Get a Thread reference by its pthread ID
1521  * @param index The pthread's ID
1522  * @return A Thread reference
1523  */
1524 Thread * ModelExecution::get_pthread(pthread_t pid) {
1525   union {
1526     pthread_t p;
1527     uint32_t v;
1528   } x;
1529   x.p = pid;
1530   uint32_t thread_id = x.v;
1531   if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1532         else return NULL;
1533 }
1534
1535 /**
1536  * @brief Check if a Thread is currently enabled
1537  * @param t The Thread to check
1538  * @return True if the Thread is currently enabled
1539  */
1540 bool ModelExecution::is_enabled(Thread *t) const
1541 {
1542         return scheduler->is_enabled(t);
1543 }
1544
1545 /**
1546  * @brief Check if a Thread is currently enabled
1547  * @param tid The ID of the Thread to check
1548  * @return True if the Thread is currently enabled
1549  */
1550 bool ModelExecution::is_enabled(thread_id_t tid) const
1551 {
1552         return scheduler->is_enabled(tid);
1553 }
1554
1555 /**
1556  * @brief Select the next thread to execute based on the curren action
1557  *
1558  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1559  * actions should be followed by the execution of their child thread. In either
1560  * case, the current action should determine the next thread schedule.
1561  *
1562  * @param curr The current action
1563  * @return The next thread to run, if the current action will determine this
1564  * selection; otherwise NULL
1565  */
1566 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1567 {
1568         /* Do not split atomic RMW */
1569         if (curr->is_rmwr())
1570                 return get_thread(curr);
1571         if (curr->is_write()) {
1572                 std::memory_order order = curr->get_mo(); 
1573                 switch(order) {
1574                         case std::memory_order_relaxed: 
1575                                 return get_thread(curr);
1576                         case std::memory_order_release:
1577                                 return get_thread(curr);
1578                         default:
1579                                 return NULL;
1580                 }       
1581         }
1582
1583         /* Follow CREATE with the created thread */
1584         /* which is not needed, because model.cc takes care of this */
1585         if (curr->get_type() == THREAD_CREATE)
1586                 return curr->get_thread_operand(); 
1587         if (curr->get_type() == PTHREAD_CREATE) {
1588                 return curr->get_thread_operand();
1589         }
1590         return NULL;
1591 }
1592
1593 /**
1594  * Takes the next step in the execution, if possible.
1595  * @param curr The current step to take
1596  * @return Returns the next Thread to run, if any; NULL if this execution
1597  * should terminate
1598  */
1599 Thread * ModelExecution::take_step(ModelAction *curr)
1600 {
1601         Thread *curr_thrd = get_thread(curr);
1602         ASSERT(curr_thrd->get_state() == THREAD_READY);
1603
1604         ASSERT(check_action_enabled(curr)); /* May have side effects? */
1605         curr = check_current_action(curr);
1606         ASSERT(curr);
1607
1608         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1609                 scheduler->remove_thread(curr_thrd);
1610
1611         return action_select_next_thread(curr);
1612 }
1613