Revert "delete plugins"
[c11tester.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <new>
4 #include <stdarg.h>
5
6 #include "model.h"
7 #include "execution.h"
8 #include "action.h"
9 #include "nodestack.h"
10 #include "schedule.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "datarace.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
17
18 #define INITIAL_THREAD_ID       0
19
20 /**
21  * Structure for holding small ModelChecker members that should be snapshotted
22  */
23 struct model_snapshot_members {
24         model_snapshot_members() :
25                 /* First thread created will have id INITIAL_THREAD_ID */
26                 next_thread_id(INITIAL_THREAD_ID),
27                 used_sequence_numbers(0),
28                 next_backtrack(NULL),
29                 bugs(),
30                 too_many_reads(false),
31                 no_valid_reads(false),
32                 bad_synchronization(false),
33                 bad_sc_read(false),
34                 asserted(false)
35         { }
36
37         ~model_snapshot_members() {
38                 for (unsigned int i = 0; i < bugs.size(); i++)
39                         delete bugs[i];
40                 bugs.clear();
41         }
42
43         unsigned int next_thread_id;
44         modelclock_t used_sequence_numbers;
45         ModelAction *next_backtrack;
46         SnapVector<bug_message *> bugs;
47         bool too_many_reads;
48         bool no_valid_reads;
49         /** @brief Incorrectly-ordered synchronization was made */
50         bool bad_synchronization;
51         bool bad_sc_read;
52         bool asserted;
53
54         SNAPSHOTALLOC
55 };
56
57 /** @brief Constructor */
58 ModelExecution::ModelExecution(ModelChecker *m,
59                 const struct model_params *params,
60                 Scheduler *scheduler,
61                 NodeStack *node_stack) :
62         model(m),
63         params(params),
64         scheduler(scheduler),
65         action_trace(),
66         thread_map(2), /* We'll always need at least 2 threads */
67         pthread_map(0),
68         pthread_counter(0),
69         obj_map(),
70         condvar_waiters_map(),
71         obj_thrd_map(),
72         mutex_map(),
73         thrd_last_action(1),
74         thrd_last_fence_release(),
75         node_stack(node_stack),
76         priv(new struct model_snapshot_members()),
77         mo_graph(new CycleGraph())
78 {
79         /* Initialize a model-checker thread, for special ModelActions */
80         model_thread = new Thread(get_next_id());
81         add_thread(model_thread);
82         scheduler->register_engine(this);
83         node_stack->register_engine(this);
84 }
85
86 /** @brief Destructor */
87 ModelExecution::~ModelExecution()
88 {
89         for (unsigned int i = 0; i < get_num_threads(); i++)
90                 delete get_thread(int_to_id(i));
91
92         delete mo_graph;
93         delete priv;
94 }
95
96 int ModelExecution::get_execution_number() const
97 {
98         return model->get_execution_number();
99 }
100
101 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
102 {
103         action_list_t *tmp = hash->get(ptr);
104         if (tmp == NULL) {
105                 tmp = new action_list_t();
106                 hash->put(ptr, tmp);
107         }
108         return tmp;
109 }
110
111 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
112 {
113         SnapVector<action_list_t> *tmp = hash->get(ptr);
114         if (tmp == NULL) {
115                 tmp = new SnapVector<action_list_t>();
116                 hash->put(ptr, tmp);
117         }
118         return tmp;
119 }
120
121 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
122 {
123         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
124         if (wrv==NULL)
125                 return NULL;
126         unsigned int thread=id_to_int(tid);
127         if (thread < wrv->size())
128                 return &(*wrv)[thread];
129         else
130                 return NULL;
131 }
132
133 /** @return a thread ID for a new Thread */
134 thread_id_t ModelExecution::get_next_id()
135 {
136         return priv->next_thread_id++;
137 }
138
139 /** @return the number of user threads created during this execution */
140 unsigned int ModelExecution::get_num_threads() const
141 {
142         return priv->next_thread_id;
143 }
144
145 /** @return a sequence number for a new ModelAction */
146 modelclock_t ModelExecution::get_next_seq_num()
147 {
148         return ++priv->used_sequence_numbers;
149 }
150
151 /**
152  * @brief Should the current action wake up a given thread?
153  *
154  * @param curr The current action
155  * @param thread The thread that we might wake up
156  * @return True, if we should wake up the sleeping thread; false otherwise
157  */
158 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
159 {
160         const ModelAction *asleep = thread->get_pending();
161         /* Don't allow partial RMW to wake anyone up */
162         if (curr->is_rmwr())
163                 return false;
164         /* Synchronizing actions may have been backtracked */
165         if (asleep->could_synchronize_with(curr))
166                 return true;
167         /* All acquire/release fences and fence-acquire/store-release */
168         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
169                 return true;
170         /* Fence-release + store can awake load-acquire on the same location */
171         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
172                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
173                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
174                         return true;
175         }
176         return false;
177 }
178
179 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
180 {
181         for (unsigned int i = 0; i < get_num_threads(); i++) {
182                 Thread *thr = get_thread(int_to_id(i));
183                 if (scheduler->is_sleep_set(thr)) {
184                         if (should_wake_up(curr, thr))
185                                 /* Remove this thread from sleep set */
186                                 scheduler->remove_sleep(thr);
187                 }
188         }
189 }
190
191 /** @brief Alert the model-checker that an incorrectly-ordered
192  * synchronization was made */
193 void ModelExecution::set_bad_synchronization()
194 {
195         priv->bad_synchronization = true;
196 }
197
198 /** @brief Alert the model-checker that an incorrectly-ordered
199  * synchronization was made */
200 void ModelExecution::set_bad_sc_read()
201 {
202         priv->bad_sc_read = true;
203 }
204
205 bool ModelExecution::assert_bug(const char *msg)
206 {
207         priv->bugs.push_back(new bug_message(msg));
208
209         if (isfeasibleprefix()) {
210                 set_assert();
211                 return true;
212         }
213         return false;
214 }
215
216 /** @return True, if any bugs have been reported for this execution */
217 bool ModelExecution::have_bug_reports() const
218 {
219         return priv->bugs.size() != 0;
220 }
221
222 SnapVector<bug_message *> * ModelExecution::get_bugs() const
223 {
224         return &priv->bugs;
225 }
226
227 /**
228  * Check whether the current trace has triggered an assertion which should halt
229  * its execution.
230  *
231  * @return True, if the execution should be aborted; false otherwise
232  */
233 bool ModelExecution::has_asserted() const
234 {
235         return priv->asserted;
236 }
237
238 /**
239  * Trigger a trace assertion which should cause this execution to be halted.
240  * This can be due to a detected bug or due to an infeasibility that should
241  * halt ASAP.
242  */
243 void ModelExecution::set_assert()
244 {
245         priv->asserted = true;
246 }
247
248 /**
249  * Check if we are in a deadlock. Should only be called at the end of an
250  * execution, although it should not give false positives in the middle of an
251  * execution (there should be some ENABLED thread).
252  *
253  * @return True if program is in a deadlock; false otherwise
254  */
255 bool ModelExecution::is_deadlocked() const
256 {
257         bool blocking_threads = false;
258         for (unsigned int i = 0; i < get_num_threads(); i++) {
259                 thread_id_t tid = int_to_id(i);
260                 if (is_enabled(tid))
261                         return false;
262                 Thread *t = get_thread(tid);
263                 if (!t->is_model_thread() && t->get_pending())
264                         blocking_threads = true;
265         }
266         return blocking_threads;
267 }
268
269 /**
270  * Check if this is a complete execution. That is, have all thread completed
271  * execution (rather than exiting because sleep sets have forced a redundant
272  * execution).
273  *
274  * @return True if the execution is complete.
275  */
276 bool ModelExecution::is_complete_execution() const
277 {
278         for (unsigned int i = 0; i < get_num_threads(); i++)
279                 if (is_enabled(int_to_id(i)))
280                         return false;
281         return true;
282 }
283
284
285 /**
286  * Processes a read model action.
287  * @param curr is the read model action to process.
288  * @param rf_set is the set of model actions we can possibly read from
289  * @return True if processing this read updates the mo_graph.
290  */
291 bool ModelExecution::process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set)
292 {
293         int random_index = random() % rf_set->size();
294         bool updated = false;
295                 
296         const ModelAction *rf = (*rf_set)[random_index];
297         ASSERT(rf);
298         
299         mo_graph->startChanges();
300         
301         updated = r_modification_order(curr, rf);
302         read_from(curr, rf);
303         mo_graph->commitChanges();
304         get_thread(curr)->set_return_value(curr->get_return_value());
305         return updated;
306 }
307
308 /**
309  * Processes a lock, trylock, or unlock model action.  @param curr is
310  * the read model action to process.
311  *
312  * The try lock operation checks whether the lock is taken.  If not,
313  * it falls to the normal lock operation case.  If so, it returns
314  * fail.
315  *
316  * The lock operation has already been checked that it is enabled, so
317  * it just grabs the lock and synchronizes with the previous unlock.
318  *
319  * The unlock operation has to re-enable all of the threads that are
320  * waiting on the lock.
321  *
322  * @return True if synchronization was updated; false otherwise
323  */
324 bool ModelExecution::process_mutex(ModelAction *curr)
325 {
326         cdsc::mutex *mutex = curr->get_mutex();
327         struct cdsc::mutex_state *state = NULL;
328
329         if (mutex)
330                 state = mutex->get_state();
331
332         switch (curr->get_type()) {
333         case ATOMIC_TRYLOCK: {
334                 bool success = !state->locked;
335                 curr->set_try_lock(success);
336                 if (!success) {
337                         get_thread(curr)->set_return_value(0);
338                         break;
339                 }
340                 get_thread(curr)->set_return_value(1);
341         }
342                 //otherwise fall into the lock case
343         case ATOMIC_LOCK: {
344                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
345                         assert_bug("Lock access before initialization");
346                 state->locked = get_thread(curr);
347                 ModelAction *unlock = get_last_unlock(curr);
348                 //synchronize with the previous unlock statement
349                 if (unlock != NULL) {
350                         synchronize(unlock, curr);
351                         return true;
352                 }
353                 break;
354         }
355         case ATOMIC_WAIT:
356         case ATOMIC_UNLOCK: {
357                 /* wake up the other threads */
358                 for (unsigned int i = 0; i < get_num_threads(); i++) {
359                         Thread *t = get_thread(int_to_id(i));
360                         Thread *curr_thrd = get_thread(curr);
361                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
362                                 scheduler->wake(t);
363                 }
364
365                 /* unlock the lock - after checking who was waiting on it */
366                 state->locked = NULL;
367
368                 if (!curr->is_wait())
369                         break; /* The rest is only for ATOMIC_WAIT */
370
371                 break;
372         }
373         case ATOMIC_NOTIFY_ALL: {
374                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
375                 //activate all the waiting threads
376                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
377                         scheduler->wake(get_thread(*rit));
378                 }
379                 waiters->clear();
380                 break;
381         }
382         case ATOMIC_NOTIFY_ONE: {
383                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
384
385                 //BCD -- TOFIX FUZZER
386                 //THIS SHOULD BE A RANDOM CHOICE
387                 //              int wakeupthread = curr->get_node()->get_misc();
388                 int wakeupthread = 0;
389                 action_list_t::iterator it = waiters->begin();
390
391                 // WL
392                 if (it == waiters->end())
393                         break;
394
395                 advance(it, wakeupthread);
396                 scheduler->wake(get_thread(*it));
397                 waiters->erase(it);
398                 break;
399         }
400
401         default:
402                 ASSERT(0);
403         }
404         return false;
405 }
406
407 /**
408  * Process a write ModelAction
409  * @param curr The ModelAction to process
410  * @return True if the mo_graph was updated or promises were resolved
411  */
412 bool ModelExecution::process_write(ModelAction *curr)
413 {
414
415         bool updated_mod_order = w_modification_order(curr);
416
417         mo_graph->commitChanges();
418
419         get_thread(curr)->set_return_value(VALUE_NONE);
420         return updated_mod_order;
421 }
422
423 /**
424  * Process a fence ModelAction
425  * @param curr The ModelAction to process
426  * @return True if synchronization was updated
427  */
428 bool ModelExecution::process_fence(ModelAction *curr)
429 {
430         /*
431          * fence-relaxed: no-op
432          * fence-release: only log the occurence (not in this function), for
433          *   use in later synchronization
434          * fence-acquire (this function): search for hypothetical release
435          *   sequences
436          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
437          */
438         bool updated = false;
439         if (curr->is_acquire()) {
440                 action_list_t *list = &action_trace;
441                 action_list_t::reverse_iterator rit;
442                 /* Find X : is_read(X) && X --sb-> curr */
443                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
444                         ModelAction *act = *rit;
445                         if (act == curr)
446                                 continue;
447                         if (act->get_tid() != curr->get_tid())
448                                 continue;
449                         /* Stop at the beginning of the thread */
450                         if (act->is_thread_start())
451                                 break;
452                         /* Stop once we reach a prior fence-acquire */
453                         if (act->is_fence() && act->is_acquire())
454                                 break;
455                         if (!act->is_read())
456                                 continue;
457                         /* read-acquire will find its own release sequences */
458                         if (act->is_acquire())
459                                 continue;
460
461                         /* Establish hypothetical release sequences */
462                         rel_heads_list_t release_heads;
463                         get_release_seq_heads(curr, act, &release_heads);
464                         for (unsigned int i = 0; i < release_heads.size(); i++)
465                                 synchronize(release_heads[i], curr);
466                         if (release_heads.size() != 0)
467                                 updated = true;
468                 }
469         }
470         return updated;
471 }
472
473 /**
474  * @brief Process the current action for thread-related activity
475  *
476  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
477  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
478  * synchronization, etc.  This function is a no-op for non-THREAD actions
479  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
480  *
481  * @param curr The current action
482  * @return True if synchronization was updated or a thread completed
483  */
484 bool ModelExecution::process_thread_action(ModelAction *curr)
485 {
486         bool updated = false;
487
488         switch (curr->get_type()) {
489         case THREAD_CREATE: {
490                 thrd_t *thrd = (thrd_t *)curr->get_location();
491                 struct thread_params *params = (struct thread_params *)curr->get_value();
492                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
493                 curr->set_thread_operand(th);
494                 add_thread(th);
495                 th->set_creation(curr);
496                 break;
497         }
498         case PTHREAD_CREATE: {
499                 (*(pthread_t *)curr->get_location()) = pthread_counter++;       
500
501                 struct pthread_params *params = (struct pthread_params *)curr->get_value();
502                 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
503                 curr->set_thread_operand(th);
504                 add_thread(th);
505                 th->set_creation(curr);
506
507                 if ( pthread_map.size() < pthread_counter )
508                         pthread_map.resize( pthread_counter );
509                 pthread_map[ pthread_counter-1 ] = th;
510
511                 break;
512         }
513         case THREAD_JOIN: {
514                 Thread *blocking = curr->get_thread_operand();
515                 ModelAction *act = get_last_action(blocking->get_id());
516                 synchronize(act, curr);
517                 updated = true; /* trigger rel-seq checks */
518                 break;
519         }
520         case PTHREAD_JOIN: {
521                 Thread *blocking = curr->get_thread_operand();
522                 ModelAction *act = get_last_action(blocking->get_id());
523                 synchronize(act, curr);
524                 updated = true; /* trigger rel-seq checks */
525                 break; // WL: to be add (modified)
526         }
527
528         case THREAD_FINISH: {
529                 Thread *th = get_thread(curr);
530                 /* Wake up any joining threads */
531                 for (unsigned int i = 0; i < get_num_threads(); i++) {
532                         Thread *waiting = get_thread(int_to_id(i));
533                         if (waiting->waiting_on() == th &&
534                                         waiting->get_pending()->is_thread_join())
535                                 scheduler->wake(waiting);
536                 }
537                 th->complete();
538                 updated = true; /* trigger rel-seq checks */
539                 break;
540         }
541         case THREAD_START: {
542                 break;
543         }
544         default:
545                 break;
546         }
547
548         return updated;
549 }
550
551 /**
552  * Initialize the current action by performing one or more of the following
553  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
554  * in the NodeStack, manipulating backtracking sets, allocating and
555  * initializing clock vectors, and computing the promises to fulfill.
556  *
557  * @param curr The current action, as passed from the user context; may be
558  * freed/invalidated after the execution of this function, with a different
559  * action "returned" its place (pass-by-reference)
560  * @return True if curr is a newly-explored action; false otherwise
561  */
562 bool ModelExecution::initialize_curr_action(ModelAction **curr)
563 {
564         ModelAction *newcurr;
565
566         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
567                 newcurr = process_rmw(*curr);
568                 delete *curr;
569
570                 *curr = newcurr;
571                 return false;
572         }
573
574         (*curr)->set_seq_number(get_next_seq_num());
575
576         newcurr = node_stack->explore_action(*curr);
577         if (newcurr) {
578                 /* First restore type and order in case of RMW operation */
579                 if ((*curr)->is_rmwr())
580                         newcurr->copy_typeandorder(*curr);
581
582                 ASSERT((*curr)->get_location() == newcurr->get_location());
583                 newcurr->copy_from_new(*curr);
584
585                 /* Discard duplicate ModelAction; use action from NodeStack */
586                 delete *curr;
587
588                 /* Always compute new clock vector */
589                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
590
591                 *curr = newcurr;
592                 return false; /* Action was explored previously */
593         } else {
594                 newcurr = *curr;
595
596                 /* Always compute new clock vector */
597                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
598
599                 /* Assign most recent release fence */
600                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
601
602                 return true; /* This was a new ModelAction */
603         }
604 }
605
606 /**
607  * @brief Establish reads-from relation between two actions
608  *
609  * Perform basic operations involved with establishing a concrete rf relation,
610  * including setting the ModelAction data and checking for release sequences.
611  *
612  * @param act The action that is reading (must be a read)
613  * @param rf The action from which we are reading (must be a write)
614  *
615  * @return True if this read established synchronization
616  */
617
618 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
619 {
620         ASSERT(rf);
621         ASSERT(rf->is_write());
622
623         act->set_read_from(rf);
624         if (act->is_acquire()) {
625                 rel_heads_list_t release_heads;
626                 get_release_seq_heads(act, act, &release_heads);
627                 int num_heads = release_heads.size();
628                 for (unsigned int i = 0; i < release_heads.size(); i++)
629                         if (!synchronize(release_heads[i], act))
630                                 num_heads--;
631                 return num_heads > 0;
632         }
633         return false;
634 }
635
636 /**
637  * @brief Synchronizes two actions
638  *
639  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
640  * This function performs the synchronization as well as providing other hooks
641  * for other checks along with synchronization.
642  *
643  * @param first The left-hand side of the synchronizes-with relation
644  * @param second The right-hand side of the synchronizes-with relation
645  * @return True if the synchronization was successful (i.e., was consistent
646  * with the execution order); false otherwise
647  */
648 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
649 {
650         if (*second < *first) {
651                 set_bad_synchronization();
652                 return false;
653         }
654         return second->synchronize_with(first);
655 }
656
657 /**
658  * @brief Check whether a model action is enabled.
659  *
660  * Checks whether an operation would be successful (i.e., is a lock already
661  * locked, or is the joined thread already complete).
662  *
663  * For yield-blocking, yields are never enabled.
664  *
665  * @param curr is the ModelAction to check whether it is enabled.
666  * @return a bool that indicates whether the action is enabled.
667  */
668 bool ModelExecution::check_action_enabled(ModelAction *curr) {
669         if (curr->is_lock()) {
670                 cdsc::mutex *lock = curr->get_mutex();
671                 struct cdsc::mutex_state *state = lock->get_state();
672                 if (state->locked)
673                         return false;
674         } else if (curr->is_thread_join()) {
675                 Thread *blocking = curr->get_thread_operand();
676                 if (!blocking->is_complete()) {
677                         return false;
678                 }
679         }
680
681         return true;
682 }
683
684 /**
685  * This is the heart of the model checker routine. It performs model-checking
686  * actions corresponding to a given "current action." Among other processes, it
687  * calculates reads-from relationships, updates synchronization clock vectors,
688  * forms a memory_order constraints graph, and handles replay/backtrack
689  * execution when running permutations of previously-observed executions.
690  *
691  * @param curr The current action to process
692  * @return The ModelAction that is actually executed; may be different than
693  * curr
694  */
695 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
696 {
697         ASSERT(curr);
698         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
699         bool newly_explored = initialize_curr_action(&curr);
700
701         DBG();
702
703         wake_up_sleeping_actions(curr);
704
705         /* Add the action to lists before any other model-checking tasks */
706         if (!second_part_of_rmw)
707                 add_action_to_lists(curr);
708
709         ModelVector<ModelAction *> * rf_set = NULL;
710         /* Build may_read_from set for newly-created actions */
711         if (newly_explored && curr->is_read())
712           rf_set = build_may_read_from(curr);
713
714         process_thread_action(curr);
715         
716         if (curr->is_read() && !second_part_of_rmw) {
717           process_read(curr, rf_set);
718           delete rf_set;
719         }
720         
721         if (curr->is_write())
722           process_write(curr);
723         
724         if (curr->is_fence())
725           process_fence(curr);
726         
727         if (curr->is_mutex_op())
728           process_mutex(curr);
729
730         return curr;
731 }
732
733 /**
734  * This is the strongest feasibility check available.
735  * @return whether the current trace (partial or complete) must be a prefix of
736  * a feasible trace.
737  */
738 bool ModelExecution::isfeasibleprefix() const
739 {
740   return !is_infeasible();
741 }
742
743 /**
744  * Print disagnostic information about an infeasible execution
745  * @param prefix A string to prefix the output with; if NULL, then a default
746  * message prefix will be provided
747  */
748 void ModelExecution::print_infeasibility(const char *prefix) const
749 {
750         char buf[100];
751         char *ptr = buf;
752         if (mo_graph->checkForCycles())
753                 ptr += sprintf(ptr, "[mo cycle]");
754         if (priv->too_many_reads)
755                 ptr += sprintf(ptr, "[too many reads]");
756         if (priv->no_valid_reads)
757                 ptr += sprintf(ptr, "[no valid reads-from]");
758         if (priv->bad_synchronization)
759                 ptr += sprintf(ptr, "[bad sw ordering]");
760         if (priv->bad_sc_read)
761                 ptr += sprintf(ptr, "[bad sc read]");
762         if (ptr != buf)
763                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
764 }
765
766 /**
767  * Check if the current partial trace is infeasible. Does not check any
768  * end-of-execution flags, which might rule out the execution. Thus, this is
769  * useful only for ruling an execution as infeasible.
770  * @return whether the current partial trace is infeasible.
771  */
772 bool ModelExecution::is_infeasible() const
773 {
774         return mo_graph->checkForCycles() ||
775                 priv->no_valid_reads ||
776                 priv->too_many_reads ||
777                 priv->bad_synchronization ||
778           priv->bad_sc_read;
779 }
780
781 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
782 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
783         ModelAction *lastread = get_last_action(act->get_tid());
784         lastread->process_rmw(act);
785         if (act->is_rmw()) {
786           mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
787                 mo_graph->commitChanges();
788         }
789         return lastread;
790 }
791
792 /**
793  * @brief Updates the mo_graph with the constraints imposed from the current
794  * read.
795  *
796  * Basic idea is the following: Go through each other thread and find
797  * the last action that happened before our read.  Two cases:
798  *
799  * -# The action is a write: that write must either occur before
800  * the write we read from or be the write we read from.
801  * -# The action is a read: the write that that action read from
802  * must occur before the write we read from or be the same write.
803  *
804  * @param curr The current action. Must be a read.
805  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
806  * @return True if modification order edges were added; false otherwise
807  */
808 template <typename rf_type>
809 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
810 {
811         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
812         unsigned int i;
813         bool added = false;
814         ASSERT(curr->is_read());
815
816         /* Last SC fence in the current thread */
817         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
818         ModelAction *last_sc_write = NULL;
819         if (curr->is_seqcst())
820                 last_sc_write = get_last_seq_cst_write(curr);
821
822         /* Iterate over all threads */
823         for (i = 0; i < thrd_lists->size(); i++) {
824                 /* Last SC fence in thread i */
825                 ModelAction *last_sc_fence_thread_local = NULL;
826                 if (int_to_id((int)i) != curr->get_tid())
827                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
828
829                 /* Last SC fence in thread i, before last SC fence in current thread */
830                 ModelAction *last_sc_fence_thread_before = NULL;
831                 if (last_sc_fence_local)
832                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
833
834                 /* Iterate over actions in thread, starting from most recent */
835                 action_list_t *list = &(*thrd_lists)[i];
836                 action_list_t::reverse_iterator rit;
837                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
838                         ModelAction *act = *rit;
839
840                         /* Skip curr */
841                         if (act == curr)
842                                 continue;
843                         /* Don't want to add reflexive edges on 'rf' */
844                         if (act->equals(rf)) {
845                                 if (act->happens_before(curr))
846                                         break;
847                                 else
848                                         continue;
849                         }
850
851                         if (act->is_write()) {
852                                 /* C++, Section 29.3 statement 5 */
853                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
854                                                 *act < *last_sc_fence_thread_local) {
855                                         added = mo_graph->addEdge(act, rf) || added;
856                                         break;
857                                 }
858                                 /* C++, Section 29.3 statement 4 */
859                                 else if (act->is_seqcst() && last_sc_fence_local &&
860                                                 *act < *last_sc_fence_local) {
861                                         added = mo_graph->addEdge(act, rf) || added;
862                                         break;
863                                 }
864                                 /* C++, Section 29.3 statement 6 */
865                                 else if (last_sc_fence_thread_before &&
866                                                 *act < *last_sc_fence_thread_before) {
867                                         added = mo_graph->addEdge(act, rf) || added;
868                                         break;
869                                 }
870                         }
871
872                         /*
873                          * Include at most one act per-thread that "happens
874                          * before" curr
875                          */
876                         if (act->happens_before(curr)) {
877                                 if (act->is_write()) {
878                                         added = mo_graph->addEdge(act, rf) || added;
879                                 } else {
880                                         const ModelAction *prevrf = act->get_reads_from();
881                                         if (!prevrf->equals(rf))
882                                           added = mo_graph->addEdge(prevrf, rf) || added;
883                                 }
884                                 break;
885                         }
886                 }
887         }
888
889         return added;
890 }
891
892 /**
893  * Updates the mo_graph with the constraints imposed from the current write.
894  *
895  * Basic idea is the following: Go through each other thread and find
896  * the lastest action that happened before our write.  Two cases:
897  *
898  * (1) The action is a write => that write must occur before
899  * the current write
900  *
901  * (2) The action is a read => the write that that action read from
902  * must occur before the current write.
903  *
904  * This method also handles two other issues:
905  *
906  * (I) Sequential Consistency: Making sure that if the current write is
907  * seq_cst, that it occurs after the previous seq_cst write.
908  *
909  * (II) Sending the write back to non-synchronizing reads.
910  *
911  * @param curr The current action. Must be a write.
912  * @param send_fv A vector for stashing reads to which we may pass our future
913  * value. If NULL, then don't record any future values.
914  * @return True if modification order edges were added; false otherwise
915  */
916 bool ModelExecution::w_modification_order(ModelAction *curr)
917 {
918         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
919         unsigned int i;
920         bool added = false;
921         ASSERT(curr->is_write());
922
923         if (curr->is_seqcst()) {
924                 /* We have to at least see the last sequentially consistent write,
925                          so we are initialized. */
926                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
927                 if (last_seq_cst != NULL) {
928                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
929                 }
930         }
931
932         /* Last SC fence in the current thread */
933         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
934
935         /* Iterate over all threads */
936         for (i = 0; i < thrd_lists->size(); i++) {
937                 /* Last SC fence in thread i, before last SC fence in current thread */
938                 ModelAction *last_sc_fence_thread_before = NULL;
939                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
940                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
941
942                 /* Iterate over actions in thread, starting from most recent */
943                 action_list_t *list = &(*thrd_lists)[i];
944                 action_list_t::reverse_iterator rit;
945                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
946                         ModelAction *act = *rit;
947                         if (act == curr) {
948                                 /*
949                                  * 1) If RMW and it actually read from something, then we
950                                  * already have all relevant edges, so just skip to next
951                                  * thread.
952                                  *
953                                  * 2) If RMW and it didn't read from anything, we should
954                                  * whatever edge we can get to speed up convergence.
955                                  *
956                                  * 3) If normal write, we need to look at earlier actions, so
957                                  * continue processing list.
958                                  */
959                                 if (curr->is_rmw()) {
960                                         if (curr->get_reads_from() != NULL)
961                                                 break;
962                                         else
963                                                 continue;
964                                 } else
965                                         continue;
966                         }
967
968                         /* C++, Section 29.3 statement 7 */
969                         if (last_sc_fence_thread_before && act->is_write() &&
970                                         *act < *last_sc_fence_thread_before) {
971                                 added = mo_graph->addEdge(act, curr) || added;
972                                 break;
973                         }
974
975                         /*
976                          * Include at most one act per-thread that "happens
977                          * before" curr
978                          */
979                         if (act->happens_before(curr)) {
980                                 /*
981                                  * Note: if act is RMW, just add edge:
982                                  *   act --mo--> curr
983                                  * The following edge should be handled elsewhere:
984                                  *   readfrom(act) --mo--> act
985                                  */
986                                 if (act->is_write())
987                                         added = mo_graph->addEdge(act, curr) || added;
988                                 else if (act->is_read()) {
989                                         //if previous read accessed a null, just keep going
990                                   added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
991                                 }
992                                 break;
993                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
994                                                      !act->same_thread(curr)) {
995                                 /* We have an action that:
996                                    (1) did not happen before us
997                                    (2) is a read and we are a write
998                                    (3) cannot synchronize with us
999                                    (4) is in a different thread
1000                                    =>
1001                                    that read could potentially read from our write.  Note that
1002                                    these checks are overly conservative at this point, we'll
1003                                    do more checks before actually removing the
1004                                    pendingfuturevalue.
1005
1006                                  */
1007
1008                         }
1009                 }
1010         }
1011
1012         return added;
1013 }
1014
1015 /**
1016  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1017  * some constraints. This method checks one the following constraint (others
1018  * require compiler support):
1019  *
1020  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1021  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1022  */
1023 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1024 {
1025         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1026         unsigned int i;
1027         /* Iterate over all threads */
1028         for (i = 0; i < thrd_lists->size(); i++) {
1029                 const ModelAction *write_after_read = NULL;
1030
1031                 /* Iterate over actions in thread, starting from most recent */
1032                 action_list_t *list = &(*thrd_lists)[i];
1033                 action_list_t::reverse_iterator rit;
1034                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1035                         ModelAction *act = *rit;
1036
1037                         /* Don't disallow due to act == reader */
1038                         if (!reader->happens_before(act) || reader == act)
1039                                 break;
1040                         else if (act->is_write())
1041                                 write_after_read = act;
1042                         else if (act->is_read() && act->get_reads_from() != NULL)
1043                                 write_after_read = act->get_reads_from();
1044                 }
1045
1046                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1047                         return false;
1048         }
1049         return true;
1050 }
1051
1052 /**
1053  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1054  * The ModelAction under consideration is expected to be taking part in
1055  * release/acquire synchronization as an object of the "reads from" relation.
1056  * Note that this can only provide release sequence support for RMW chains
1057  * which do not read from the future, as those actions cannot be traced until
1058  * their "promise" is fulfilled. Similarly, we may not even establish the
1059  * presence of a release sequence with certainty, as some modification order
1060  * constraints may be decided further in the future. Thus, this function
1061  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1062  * and a boolean representing certainty.
1063  *
1064  * @param rf The action that might be part of a release sequence. Must be a
1065  * write.
1066  * @param release_heads A pass-by-reference style return parameter. After
1067  * execution of this function, release_heads will contain the heads of all the
1068  * relevant release sequences, if any exists with certainty
1069  * @return true, if the ModelExecution is certain that release_heads is complete;
1070  * false otherwise
1071  */
1072 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1073                 rel_heads_list_t *release_heads) const
1074 {
1075         /* Only check for release sequences if there are no cycles */
1076         if (mo_graph->checkForCycles())
1077                 return false;
1078
1079         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1080                 ASSERT(rf->is_write());
1081
1082                 if (rf->is_release())
1083                         release_heads->push_back(rf);
1084                 else if (rf->get_last_fence_release())
1085                         release_heads->push_back(rf->get_last_fence_release());
1086                 if (!rf->is_rmw())
1087                         break; /* End of RMW chain */
1088
1089                 /** @todo Need to be smarter here...  In the linux lock
1090                  * example, this will run to the beginning of the program for
1091                  * every acquire. */
1092                 /** @todo The way to be smarter here is to keep going until 1
1093                  * thread has a release preceded by an acquire and you've seen
1094                  *       both. */
1095
1096                 /* acq_rel RMW is a sufficient stopping condition */
1097                 if (rf->is_acquire() && rf->is_release())
1098                         return true; /* complete */
1099         };
1100         ASSERT(rf); // Needs to be real write
1101
1102         if (rf->is_release())
1103                 return true; /* complete */
1104
1105         /* else relaxed write
1106          * - check for fence-release in the same thread (29.8, stmt. 3)
1107          * - check modification order for contiguous subsequence
1108          *   -> rf must be same thread as release */
1109
1110         const ModelAction *fence_release = rf->get_last_fence_release();
1111         /* Synchronize with a fence-release unconditionally; we don't need to
1112          * find any more "contiguous subsequence..." for it */
1113         if (fence_release)
1114                 release_heads->push_back(fence_release);
1115
1116         return true; /* complete */
1117 }
1118
1119 /**
1120  * An interface for getting the release sequence head(s) with which a
1121  * given ModelAction must synchronize. This function only returns a non-empty
1122  * result when it can locate a release sequence head with certainty. Otherwise,
1123  * it may mark the internal state of the ModelExecution so that it will handle
1124  * the release sequence at a later time, causing @a acquire to update its
1125  * synchronization at some later point in execution.
1126  *
1127  * @param acquire The 'acquire' action that may synchronize with a release
1128  * sequence
1129  * @param read The read action that may read from a release sequence; this may
1130  * be the same as acquire, or else an earlier action in the same thread (i.e.,
1131  * when 'acquire' is a fence-acquire)
1132  * @param release_heads A pass-by-reference return parameter. Will be filled
1133  * with the head(s) of the release sequence(s), if they exists with certainty.
1134  * @see ModelExecution::release_seq_heads
1135  */
1136 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
1137                 ModelAction *read, rel_heads_list_t *release_heads)
1138 {
1139         const ModelAction *rf = read->get_reads_from();
1140
1141         release_seq_heads(rf, release_heads);
1142 }
1143
1144 /**
1145  * Performs various bookkeeping operations for the current ModelAction. For
1146  * instance, adds action to the per-object, per-thread action vector and to the
1147  * action trace list of all thread actions.
1148  *
1149  * @param act is the ModelAction to add.
1150  */
1151 void ModelExecution::add_action_to_lists(ModelAction *act)
1152 {
1153         int tid = id_to_int(act->get_tid());
1154         ModelAction *uninit = NULL;
1155         int uninit_id = -1;
1156         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1157         if (list->empty() && act->is_atomic_var()) {
1158                 uninit = get_uninitialized_action(act);
1159                 uninit_id = id_to_int(uninit->get_tid());
1160                 list->push_front(uninit);
1161         }
1162         list->push_back(act);
1163
1164         action_trace.push_back(act);
1165         if (uninit)
1166                 action_trace.push_front(uninit);
1167
1168         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1169         if (tid >= (int)vec->size())
1170                 vec->resize(priv->next_thread_id);
1171         (*vec)[tid].push_back(act);
1172         if (uninit)
1173                 (*vec)[uninit_id].push_front(uninit);
1174
1175         if ((int)thrd_last_action.size() <= tid)
1176                 thrd_last_action.resize(get_num_threads());
1177         thrd_last_action[tid] = act;
1178         if (uninit)
1179                 thrd_last_action[uninit_id] = uninit;
1180
1181         if (act->is_fence() && act->is_release()) {
1182                 if ((int)thrd_last_fence_release.size() <= tid)
1183                         thrd_last_fence_release.resize(get_num_threads());
1184                 thrd_last_fence_release[tid] = act;
1185         }
1186
1187         if (act->is_wait()) {
1188                 void *mutex_loc = (void *) act->get_value();
1189                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1190
1191                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1192                 if (tid >= (int)vec->size())
1193                         vec->resize(priv->next_thread_id);
1194                 (*vec)[tid].push_back(act);
1195         }
1196 }
1197
1198 /**
1199  * @brief Get the last action performed by a particular Thread
1200  * @param tid The thread ID of the Thread in question
1201  * @return The last action in the thread
1202  */
1203 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1204 {
1205         int threadid = id_to_int(tid);
1206         if (threadid < (int)thrd_last_action.size())
1207                 return thrd_last_action[id_to_int(tid)];
1208         else
1209                 return NULL;
1210 }
1211
1212 /**
1213  * @brief Get the last fence release performed by a particular Thread
1214  * @param tid The thread ID of the Thread in question
1215  * @return The last fence release in the thread, if one exists; NULL otherwise
1216  */
1217 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1218 {
1219         int threadid = id_to_int(tid);
1220         if (threadid < (int)thrd_last_fence_release.size())
1221                 return thrd_last_fence_release[id_to_int(tid)];
1222         else
1223                 return NULL;
1224 }
1225
1226 /**
1227  * Gets the last memory_order_seq_cst write (in the total global sequence)
1228  * performed on a particular object (i.e., memory location), not including the
1229  * current action.
1230  * @param curr The current ModelAction; also denotes the object location to
1231  * check
1232  * @return The last seq_cst write
1233  */
1234 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1235 {
1236         void *location = curr->get_location();
1237         action_list_t *list = obj_map.get(location);
1238         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1239         action_list_t::reverse_iterator rit;
1240         for (rit = list->rbegin(); (*rit) != curr; rit++)
1241                 ;
1242         rit++; /* Skip past curr */
1243         for ( ; rit != list->rend(); rit++)
1244                 if ((*rit)->is_write() && (*rit)->is_seqcst())
1245                         return *rit;
1246         return NULL;
1247 }
1248
1249 /**
1250  * Gets the last memory_order_seq_cst fence (in the total global sequence)
1251  * performed in a particular thread, prior to a particular fence.
1252  * @param tid The ID of the thread to check
1253  * @param before_fence The fence from which to begin the search; if NULL, then
1254  * search for the most recent fence in the thread.
1255  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1256  */
1257 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1258 {
1259         /* All fences should have location FENCE_LOCATION */
1260         action_list_t *list = obj_map.get(FENCE_LOCATION);
1261
1262         if (!list)
1263                 return NULL;
1264
1265         action_list_t::reverse_iterator rit = list->rbegin();
1266
1267         if (before_fence) {
1268                 for (; rit != list->rend(); rit++)
1269                         if (*rit == before_fence)
1270                                 break;
1271
1272                 ASSERT(*rit == before_fence);
1273                 rit++;
1274         }
1275
1276         for (; rit != list->rend(); rit++)
1277                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1278                         return *rit;
1279         return NULL;
1280 }
1281
1282 /**
1283  * Gets the last unlock operation performed on a particular mutex (i.e., memory
1284  * location). This function identifies the mutex according to the current
1285  * action, which is presumed to perform on the same mutex.
1286  * @param curr The current ModelAction; also denotes the object location to
1287  * check
1288  * @return The last unlock operation
1289  */
1290 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1291 {
1292         void *location = curr->get_location();
1293
1294         action_list_t *list = obj_map.get(location);
1295         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1296         action_list_t::reverse_iterator rit;
1297         for (rit = list->rbegin(); rit != list->rend(); rit++)
1298                 if ((*rit)->is_unlock() || (*rit)->is_wait())
1299                         return *rit;
1300         return NULL;
1301 }
1302
1303 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1304 {
1305         ModelAction *parent = get_last_action(tid);
1306         if (!parent)
1307                 parent = get_thread(tid)->get_creation();
1308         return parent;
1309 }
1310
1311 /**
1312  * Returns the clock vector for a given thread.
1313  * @param tid The thread whose clock vector we want
1314  * @return Desired clock vector
1315  */
1316 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1317 {
1318         return get_parent_action(tid)->get_cv();
1319 }
1320
1321 /**
1322  * Build up an initial set of all past writes that this 'read' action may read
1323  * from, as well as any previously-observed future values that must still be valid.
1324  *
1325  * @param curr is the current ModelAction that we are exploring; it must be a
1326  * 'read' operation.
1327  */
1328 ModelVector<ModelAction *> *  ModelExecution::build_may_read_from(ModelAction *curr)
1329 {
1330         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1331         unsigned int i;
1332         ASSERT(curr->is_read());
1333
1334         ModelAction *last_sc_write = NULL;
1335
1336         if (curr->is_seqcst())
1337                 last_sc_write = get_last_seq_cst_write(curr);
1338
1339         ModelVector<ModelAction *> * rf_set = new ModelVector<ModelAction *>();
1340         
1341         /* Iterate over all threads */
1342         for (i = 0; i < thrd_lists->size(); i++) {
1343                 /* Iterate over actions in thread, starting from most recent */
1344                 action_list_t *list = &(*thrd_lists)[i];
1345                 action_list_t::reverse_iterator rit;
1346                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1347                         ModelAction *act = *rit;
1348
1349                         /* Only consider 'write' actions */
1350                         if (!act->is_write() || act == curr)
1351                                 continue;
1352
1353                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1354                         bool allow_read = true;
1355
1356                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1357                                 allow_read = false;
1358
1359                         if (allow_read) {
1360                                 /* Only add feasible reads */
1361                                 mo_graph->startChanges();
1362                                 r_modification_order(curr, act);
1363                                 if (!is_infeasible())
1364                                   rf_set->push_back(act);
1365                                 mo_graph->rollbackChanges();
1366                         }
1367
1368                         /* Include at most one act per-thread that "happens before" curr */
1369                         if (act->happens_before(curr))
1370                                 break;
1371                 }
1372         }
1373
1374         if (DBG_ENABLED()) {
1375                 model_print("Reached read action:\n");
1376                 curr->print();
1377                 model_print("End printing read_from_past\n");
1378         }
1379         return rf_set;
1380 }
1381
1382 /**
1383  * @brief Get an action representing an uninitialized atomic
1384  *
1385  * This function may create a new one or try to retrieve one from the NodeStack
1386  *
1387  * @param curr The current action, which prompts the creation of an UNINIT action
1388  * @return A pointer to the UNINIT ModelAction
1389  */
1390 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
1391 {
1392         Node *node = curr->get_node();
1393         ModelAction *act = node->get_uninit_action();
1394         if (!act) {
1395                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1396                 node->set_uninit_action(act);
1397         }
1398         act->create_cv(NULL);
1399         return act;
1400 }
1401
1402 static void print_list(const action_list_t *list)
1403 {
1404         action_list_t::const_iterator it;
1405
1406         model_print("------------------------------------------------------------------------------------\n");
1407         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
1408         model_print("------------------------------------------------------------------------------------\n");
1409
1410         unsigned int hash = 0;
1411
1412         for (it = list->begin(); it != list->end(); it++) {
1413                 const ModelAction *act = *it;
1414                 if (act->get_seq_number() > 0)
1415                         act->print();
1416                 hash = hash^(hash<<3)^((*it)->hash());
1417         }
1418         model_print("HASH %u\n", hash);
1419         model_print("------------------------------------------------------------------------------------\n");
1420 }
1421
1422 #if SUPPORT_MOD_ORDER_DUMP
1423 void ModelExecution::dumpGraph(char *filename) const
1424 {
1425         char buffer[200];
1426         sprintf(buffer, "%s.dot", filename);
1427         FILE *file = fopen(buffer, "w");
1428         fprintf(file, "digraph %s {\n", filename);
1429         mo_graph->dumpNodes(file);
1430         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1431
1432         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
1433                 ModelAction *act = *it;
1434                 if (act->is_read()) {
1435                         mo_graph->dot_print_node(file, act);
1436                         mo_graph->dot_print_edge(file,
1437                                                 act->get_reads_from(),
1438                                                 act,
1439                                                 "label=\"rf\", color=red, weight=2");
1440                 }
1441                 if (thread_array[act->get_tid()]) {
1442                         mo_graph->dot_print_edge(file,
1443                                         thread_array[id_to_int(act->get_tid())],
1444                                         act,
1445                                         "label=\"sb\", color=blue, weight=400");
1446                 }
1447
1448                 thread_array[act->get_tid()] = act;
1449         }
1450         fprintf(file, "}\n");
1451         model_free(thread_array);
1452         fclose(file);
1453 }
1454 #endif
1455
1456 /** @brief Prints an execution trace summary. */
1457 void ModelExecution::print_summary() const
1458 {
1459 #if SUPPORT_MOD_ORDER_DUMP
1460         char buffername[100];
1461         sprintf(buffername, "exec%04u", get_execution_number());
1462         mo_graph->dumpGraphToFile(buffername);
1463         sprintf(buffername, "graph%04u", get_execution_number());
1464         dumpGraph(buffername);
1465 #endif
1466
1467         model_print("Execution trace %d:", get_execution_number());
1468         if (isfeasibleprefix()) {
1469                 if (scheduler->all_threads_sleeping())
1470                         model_print(" SLEEP-SET REDUNDANT");
1471                 if (have_bug_reports())
1472                         model_print(" DETECTED BUG(S)");
1473         } else
1474                 print_infeasibility(" INFEASIBLE");
1475         model_print("\n");
1476
1477         print_list(&action_trace);
1478         model_print("\n");
1479
1480 }
1481
1482 /**
1483  * Add a Thread to the system for the first time. Should only be called once
1484  * per thread.
1485  * @param t The Thread to add
1486  */
1487 void ModelExecution::add_thread(Thread *t)
1488 {
1489         unsigned int i = id_to_int(t->get_id());
1490         if (i >= thread_map.size())
1491                 thread_map.resize(i + 1);
1492         thread_map[i] = t;
1493         if (!t->is_model_thread())
1494                 scheduler->add_thread(t);
1495 }
1496
1497 /**
1498  * @brief Get a Thread reference by its ID
1499  * @param tid The Thread's ID
1500  * @return A Thread reference
1501  */
1502 Thread * ModelExecution::get_thread(thread_id_t tid) const
1503 {
1504         unsigned int i = id_to_int(tid);
1505         if (i < thread_map.size())
1506                 return thread_map[i];
1507         return NULL;
1508 }
1509
1510 /**
1511  * @brief Get a reference to the Thread in which a ModelAction was executed
1512  * @param act The ModelAction
1513  * @return A Thread reference
1514  */
1515 Thread * ModelExecution::get_thread(const ModelAction *act) const
1516 {
1517         return get_thread(act->get_tid());
1518 }
1519
1520 /**
1521  * @brief Get a Thread reference by its pthread ID
1522  * @param index The pthread's ID
1523  * @return A Thread reference
1524  */
1525 Thread * ModelExecution::get_pthread(pthread_t pid) {
1526         if (pid < pthread_counter + 1) return pthread_map[pid];
1527         else return NULL;
1528 }
1529
1530 /**
1531  * @brief Check if a Thread is currently enabled
1532  * @param t The Thread to check
1533  * @return True if the Thread is currently enabled
1534  */
1535 bool ModelExecution::is_enabled(Thread *t) const
1536 {
1537         return scheduler->is_enabled(t);
1538 }
1539
1540 /**
1541  * @brief Check if a Thread is currently enabled
1542  * @param tid The ID of the Thread to check
1543  * @return True if the Thread is currently enabled
1544  */
1545 bool ModelExecution::is_enabled(thread_id_t tid) const
1546 {
1547         return scheduler->is_enabled(tid);
1548 }
1549
1550 /**
1551  * @brief Select the next thread to execute based on the curren action
1552  *
1553  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1554  * actions should be followed by the execution of their child thread. In either
1555  * case, the current action should determine the next thread schedule.
1556  *
1557  * @param curr The current action
1558  * @return The next thread to run, if the current action will determine this
1559  * selection; otherwise NULL
1560  */
1561 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1562 {
1563         /* Do not split atomic RMW */
1564         if (curr->is_rmwr())
1565                 return get_thread(curr);
1566         if (curr->is_write()) {
1567                 std::memory_order order = curr->get_mo(); 
1568                 switch(order) {
1569                         case std::memory_order_relaxed: 
1570                                 return get_thread(curr);
1571                         case std::memory_order_release:
1572                                 return get_thread(curr);
1573                         default:
1574                                 return NULL;
1575                 }       
1576         }
1577
1578         /* Follow CREATE with the created thread */
1579         /* which is not needed, because model.cc takes care of this */
1580         if (curr->get_type() == THREAD_CREATE)
1581                 return curr->get_thread_operand(); 
1582         if (curr->get_type() == PTHREAD_CREATE) {
1583                 return curr->get_thread_operand();
1584         }
1585         return NULL;
1586 }
1587
1588 /** @return True if the execution has taken too many steps */
1589 bool ModelExecution::too_many_steps() const
1590 {
1591         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
1592 }
1593
1594 /**
1595  * Takes the next step in the execution, if possible.
1596  * @param curr The current step to take
1597  * @return Returns the next Thread to run, if any; NULL if this execution
1598  * should terminate
1599  */
1600 Thread * ModelExecution::take_step(ModelAction *curr)
1601 {
1602         Thread *curr_thrd = get_thread(curr);
1603         ASSERT(curr_thrd->get_state() == THREAD_READY);
1604
1605         ASSERT(check_action_enabled(curr)); /* May have side effects? */
1606         curr = check_current_action(curr);
1607         ASSERT(curr);
1608
1609         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1610                 scheduler->remove_thread(curr_thrd);
1611
1612         return action_select_next_thread(curr);
1613 }
1614