11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
31 bad_synchronization(false),
35 ~model_snapshot_members() {
36 for (unsigned int i = 0;i < bugs.size();i++)
41 unsigned int next_thread_id;
42 modelclock_t used_sequence_numbers;
43 SnapVector<bug_message *> bugs;
44 /** @brief Incorrectly-ordered synchronization was made */
45 bool bad_synchronization;
51 /** @brief Constructor */
52 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
57 thread_map(2), /* We'll always need at least 2 threads */
61 condvar_waiters_map(),
65 thrd_last_fence_release(),
66 priv(new struct model_snapshot_members ()),
67 mo_graph(new CycleGraph()),
68 fuzzer(new NewFuzzer()),
70 thrd_func_act_lists(),
73 /* Initialize a model-checker thread, for special ModelActions */
74 model_thread = new Thread(get_next_id());
75 add_thread(model_thread);
76 scheduler->register_engine(this);
77 fuzzer->register_engine(m->get_history(), this);
80 /** @brief Destructor */
81 ModelExecution::~ModelExecution()
83 for (unsigned int i = 0;i < get_num_threads();i++)
84 delete get_thread(int_to_id(i));
90 int ModelExecution::get_execution_number() const
92 return model->get_execution_number();
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
97 action_list_t *tmp = hash->get(ptr);
99 tmp = new action_list_t();
105 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
107 SnapVector<action_list_t> *tmp = hash->get(ptr);
109 tmp = new SnapVector<action_list_t>();
115 /** @return a thread ID for a new Thread */
116 thread_id_t ModelExecution::get_next_id()
118 return priv->next_thread_id++;
121 /** @return the number of user threads created during this execution */
122 unsigned int ModelExecution::get_num_threads() const
124 return priv->next_thread_id;
127 /** @return a sequence number for a new ModelAction */
128 modelclock_t ModelExecution::get_next_seq_num()
130 return ++priv->used_sequence_numbers;
133 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
134 void ModelExecution::restore_last_seq_num()
136 priv->used_sequence_numbers--;
140 * @brief Should the current action wake up a given thread?
142 * @param curr The current action
143 * @param thread The thread that we might wake up
144 * @return True, if we should wake up the sleeping thread; false otherwise
146 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
148 const ModelAction *asleep = thread->get_pending();
149 /* Don't allow partial RMW to wake anyone up */
152 /* Synchronizing actions may have been backtracked */
153 if (asleep->could_synchronize_with(curr))
155 /* All acquire/release fences and fence-acquire/store-release */
156 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
158 /* Fence-release + store can awake load-acquire on the same location */
159 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
160 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
161 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
164 if (asleep->is_sleep()) {
165 if (fuzzer->shouldWake(asleep))
172 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
174 for (unsigned int i = 0;i < get_num_threads();i++) {
175 Thread *thr = get_thread(int_to_id(i));
176 if (scheduler->is_sleep_set(thr)) {
177 if (should_wake_up(curr, thr)) {
178 /* Remove this thread from sleep set */
179 scheduler->remove_sleep(thr);
180 if (thr->get_pending()->is_sleep())
181 thr->set_pending(NULL);
187 /** @brief Alert the model-checker that an incorrectly-ordered
188 * synchronization was made */
189 void ModelExecution::set_bad_synchronization()
191 priv->bad_synchronization = true;
194 bool ModelExecution::assert_bug(const char *msg)
196 priv->bugs.push_back(new bug_message(msg));
198 if (isfeasibleprefix()) {
205 /** @return True, if any bugs have been reported for this execution */
206 bool ModelExecution::have_bug_reports() const
208 return priv->bugs.size() != 0;
211 SnapVector<bug_message *> * ModelExecution::get_bugs() const
217 * Check whether the current trace has triggered an assertion which should halt
220 * @return True, if the execution should be aborted; false otherwise
222 bool ModelExecution::has_asserted() const
224 return priv->asserted;
228 * Trigger a trace assertion which should cause this execution to be halted.
229 * This can be due to a detected bug or due to an infeasibility that should
232 void ModelExecution::set_assert()
234 priv->asserted = true;
238 * Check if we are in a deadlock. Should only be called at the end of an
239 * execution, although it should not give false positives in the middle of an
240 * execution (there should be some ENABLED thread).
242 * @return True if program is in a deadlock; false otherwise
244 bool ModelExecution::is_deadlocked() const
246 bool blocking_threads = false;
247 for (unsigned int i = 0;i < get_num_threads();i++) {
248 thread_id_t tid = int_to_id(i);
251 Thread *t = get_thread(tid);
252 if (!t->is_model_thread() && t->get_pending())
253 blocking_threads = true;
255 return blocking_threads;
259 * Check if this is a complete execution. That is, have all thread completed
260 * execution (rather than exiting because sleep sets have forced a redundant
263 * @return True if the execution is complete.
265 bool ModelExecution::is_complete_execution() const
267 for (unsigned int i = 0;i < get_num_threads();i++)
268 if (is_enabled(int_to_id(i)))
273 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
274 uint64_t value = *((const uint64_t *) location);
275 modelclock_t storeclock;
276 thread_id_t storethread;
277 getStoreThreadAndClock(location, &storethread, &storeclock);
278 setAtomicStoreFlag(location);
279 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
280 act->set_seq_number(storeclock);
281 add_normal_write_to_lists(act);
282 add_write_to_lists(act);
283 w_modification_order(act);
284 model->get_history()->process_action(act, act->get_tid());
289 * Processes a read model action.
290 * @param curr is the read model action to process.
291 * @param rf_set is the set of model actions we can possibly read from
292 * @return True if processing this read updates the mo_graph.
294 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
296 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
297 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
298 if (hasnonatomicstore) {
299 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
300 rf_set->push_back(nonatomicstore);
303 // Remove writes that violate read modification order
304 for (uint i = 0; i < rf_set->size(); i++) {
305 ModelAction * rf = (*rf_set)[i];
306 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
307 (*rf_set)[i] = rf_set->back();
313 int index = fuzzer->selectWrite(curr, rf_set);
314 if (index == -1) // no feasible write exists
317 ModelAction *rf = (*rf_set)[index];
320 bool canprune = false;
321 if (r_modification_order(curr, rf, priorset, &canprune)) {
322 for(unsigned int i=0;i<priorset->size();i++) {
323 mo_graph->addEdge((*priorset)[i], rf);
326 get_thread(curr)->set_return_value(curr->get_return_value());
328 if (canprune && curr->get_type() == ATOMIC_READ) {
329 int tid = id_to_int(curr->get_tid());
330 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
335 (*rf_set)[index] = rf_set->back();
341 * Processes a lock, trylock, or unlock model action. @param curr is
342 * the read model action to process.
344 * The try lock operation checks whether the lock is taken. If not,
345 * it falls to the normal lock operation case. If so, it returns
348 * The lock operation has already been checked that it is enabled, so
349 * it just grabs the lock and synchronizes with the previous unlock.
351 * The unlock operation has to re-enable all of the threads that are
352 * waiting on the lock.
354 * @return True if synchronization was updated; false otherwise
356 bool ModelExecution::process_mutex(ModelAction *curr)
358 cdsc::mutex *mutex = curr->get_mutex();
359 struct cdsc::mutex_state *state = NULL;
362 state = mutex->get_state();
364 switch (curr->get_type()) {
365 case ATOMIC_TRYLOCK: {
366 bool success = !state->locked;
367 curr->set_try_lock(success);
369 get_thread(curr)->set_return_value(0);
372 get_thread(curr)->set_return_value(1);
374 //otherwise fall into the lock case
376 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
377 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
378 // assert_bug("Lock access before initialization");
379 state->locked = get_thread(curr);
380 ModelAction *unlock = get_last_unlock(curr);
381 //synchronize with the previous unlock statement
382 if (unlock != NULL) {
383 synchronize(unlock, curr);
389 /* wake up the other threads */
390 for (unsigned int i = 0;i < get_num_threads();i++) {
391 Thread *t = get_thread(int_to_id(i));
392 Thread *curr_thrd = get_thread(curr);
393 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
397 /* unlock the lock - after checking who was waiting on it */
398 state->locked = NULL;
400 if (fuzzer->shouldWait(curr)) {
401 /* disable this thread */
402 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
403 scheduler->sleep(get_thread(curr));
408 case ATOMIC_TIMEDWAIT:
409 case ATOMIC_UNLOCK: {
410 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
412 /* wake up the other threads */
413 for (unsigned int i = 0;i < get_num_threads();i++) {
414 Thread *t = get_thread(int_to_id(i));
415 Thread *curr_thrd = get_thread(curr);
416 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
420 /* unlock the lock - after checking who was waiting on it */
421 state->locked = NULL;
424 case ATOMIC_NOTIFY_ALL: {
425 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
426 //activate all the waiting threads
427 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
428 scheduler->wake(get_thread(rit->getVal()));
433 case ATOMIC_NOTIFY_ONE: {
434 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
435 if (waiters->size() != 0) {
436 Thread * thread = fuzzer->selectNotify(waiters);
437 scheduler->wake(thread);
449 * Process a write ModelAction
450 * @param curr The ModelAction to process
451 * @return True if the mo_graph was updated or promises were resolved
453 void ModelExecution::process_write(ModelAction *curr)
455 w_modification_order(curr);
456 get_thread(curr)->set_return_value(VALUE_NONE);
460 * Process a fence ModelAction
461 * @param curr The ModelAction to process
462 * @return True if synchronization was updated
464 bool ModelExecution::process_fence(ModelAction *curr)
467 * fence-relaxed: no-op
468 * fence-release: only log the occurence (not in this function), for
469 * use in later synchronization
470 * fence-acquire (this function): search for hypothetical release
472 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
474 bool updated = false;
475 if (curr->is_acquire()) {
476 action_list_t *list = &action_trace;
477 sllnode<ModelAction *> * rit;
478 /* Find X : is_read(X) && X --sb-> curr */
479 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
480 ModelAction *act = rit->getVal();
483 if (act->get_tid() != curr->get_tid())
485 /* Stop at the beginning of the thread */
486 if (act->is_thread_start())
488 /* Stop once we reach a prior fence-acquire */
489 if (act->is_fence() && act->is_acquire())
493 /* read-acquire will find its own release sequences */
494 if (act->is_acquire())
497 /* Establish hypothetical release sequences */
498 ClockVector *cv = get_hb_from_write(act->get_reads_from());
499 if (cv != NULL && curr->get_cv()->merge(cv))
507 * @brief Process the current action for thread-related activity
509 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
510 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
511 * synchronization, etc. This function is a no-op for non-THREAD actions
512 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
514 * @param curr The current action
515 * @return True if synchronization was updated or a thread completed
517 void ModelExecution::process_thread_action(ModelAction *curr)
519 switch (curr->get_type()) {
520 case THREAD_CREATE: {
521 thrd_t *thrd = (thrd_t *)curr->get_location();
522 struct thread_params *params = (struct thread_params *)curr->get_value();
523 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
524 curr->set_thread_operand(th);
526 th->set_creation(curr);
529 case PTHREAD_CREATE: {
530 (*(uint32_t *)curr->get_location()) = pthread_counter++;
532 struct pthread_params *params = (struct pthread_params *)curr->get_value();
533 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
534 curr->set_thread_operand(th);
536 th->set_creation(curr);
538 if ( pthread_map.size() < pthread_counter )
539 pthread_map.resize( pthread_counter );
540 pthread_map[ pthread_counter-1 ] = th;
545 Thread *blocking = curr->get_thread_operand();
546 ModelAction *act = get_last_action(blocking->get_id());
547 synchronize(act, curr);
551 Thread *blocking = curr->get_thread_operand();
552 ModelAction *act = get_last_action(blocking->get_id());
553 synchronize(act, curr);
554 break; // WL: to be add (modified)
557 case THREADONLY_FINISH:
558 case THREAD_FINISH: {
559 Thread *th = get_thread(curr);
560 if (curr->get_type() == THREAD_FINISH &&
561 th == model->getInitThread()) {
567 /* Wake up any joining threads */
568 for (unsigned int i = 0;i < get_num_threads();i++) {
569 Thread *waiting = get_thread(int_to_id(i));
570 if (waiting->waiting_on() == th &&
571 waiting->get_pending()->is_thread_join())
572 scheduler->wake(waiting);
581 Thread *th = get_thread(curr);
582 th->set_pending(curr);
583 scheduler->add_sleep(th);
592 * Initialize the current action by performing one or more of the following
593 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
594 * manipulating backtracking sets, allocating and
595 * initializing clock vectors, and computing the promises to fulfill.
597 * @param curr The current action, as passed from the user context; may be
598 * freed/invalidated after the execution of this function, with a different
599 * action "returned" its place (pass-by-reference)
600 * @return True if curr is a newly-explored action; false otherwise
602 bool ModelExecution::initialize_curr_action(ModelAction **curr)
604 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
605 ModelAction *newcurr = process_rmw(*curr);
611 ModelAction *newcurr = *curr;
613 newcurr->set_seq_number(get_next_seq_num());
614 /* Always compute new clock vector */
615 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
617 /* Assign most recent release fence */
618 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
620 return true; /* This was a new ModelAction */
625 * @brief Establish reads-from relation between two actions
627 * Perform basic operations involved with establishing a concrete rf relation,
628 * including setting the ModelAction data and checking for release sequences.
630 * @param act The action that is reading (must be a read)
631 * @param rf The action from which we are reading (must be a write)
633 * @return True if this read established synchronization
636 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
639 ASSERT(rf->is_write());
641 act->set_read_from(rf);
642 if (act->is_acquire()) {
643 ClockVector *cv = get_hb_from_write(rf);
646 act->get_cv()->merge(cv);
651 * @brief Synchronizes two actions
653 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
654 * This function performs the synchronization as well as providing other hooks
655 * for other checks along with synchronization.
657 * @param first The left-hand side of the synchronizes-with relation
658 * @param second The right-hand side of the synchronizes-with relation
659 * @return True if the synchronization was successful (i.e., was consistent
660 * with the execution order); false otherwise
662 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
664 if (*second < *first) {
665 set_bad_synchronization();
668 return second->synchronize_with(first);
672 * @brief Check whether a model action is enabled.
674 * Checks whether an operation would be successful (i.e., is a lock already
675 * locked, or is the joined thread already complete).
677 * For yield-blocking, yields are never enabled.
679 * @param curr is the ModelAction to check whether it is enabled.
680 * @return a bool that indicates whether the action is enabled.
682 bool ModelExecution::check_action_enabled(ModelAction *curr) {
683 if (curr->is_lock()) {
684 cdsc::mutex *lock = curr->get_mutex();
685 struct cdsc::mutex_state *state = lock->get_state();
688 } else if (curr->is_thread_join()) {
689 Thread *blocking = curr->get_thread_operand();
690 if (!blocking->is_complete()) {
693 } else if (curr->is_sleep()) {
694 if (!fuzzer->shouldSleep(curr))
702 * This is the heart of the model checker routine. It performs model-checking
703 * actions corresponding to a given "current action." Among other processes, it
704 * calculates reads-from relationships, updates synchronization clock vectors,
705 * forms a memory_order constraints graph, and handles replay/backtrack
706 * execution when running permutations of previously-observed executions.
708 * @param curr The current action to process
709 * @return The ModelAction that is actually executed; may be different than
712 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
715 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
716 bool newly_explored = initialize_curr_action(&curr);
720 wake_up_sleeping_actions(curr);
722 /* Add uninitialized actions to lists */
723 if (!second_part_of_rmw)
724 add_uninit_action_to_lists(curr);
726 SnapVector<ModelAction *> * rf_set = NULL;
727 /* Build may_read_from set for newly-created actions */
728 if (newly_explored && curr->is_read())
729 rf_set = build_may_read_from(curr);
731 if (curr->is_read() && !second_part_of_rmw) {
732 process_read(curr, rf_set);
735 /* bool success = process_read(curr, rf_set);
738 return curr; // Do not add action to lists
741 ASSERT(rf_set == NULL);
743 /* Add the action to lists */
744 if (!second_part_of_rmw)
745 add_action_to_lists(curr);
747 if (curr->is_write())
748 add_write_to_lists(curr);
750 process_thread_action(curr);
752 if (curr->is_write())
755 if (curr->is_fence())
758 if (curr->is_mutex_op())
765 * This is the strongest feasibility check available.
766 * @return whether the current trace (partial or complete) must be a prefix of
769 bool ModelExecution::isfeasibleprefix() const
771 return !is_infeasible();
775 * Print disagnostic information about an infeasible execution
776 * @param prefix A string to prefix the output with; if NULL, then a default
777 * message prefix will be provided
779 void ModelExecution::print_infeasibility(const char *prefix) const
783 if (priv->bad_synchronization)
784 ptr += sprintf(ptr, "[bad sw ordering]");
786 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
790 * Check if the current partial trace is infeasible. Does not check any
791 * end-of-execution flags, which might rule out the execution. Thus, this is
792 * useful only for ruling an execution as infeasible.
793 * @return whether the current partial trace is infeasible.
795 bool ModelExecution::is_infeasible() const
797 return priv->bad_synchronization;
800 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
801 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
802 ModelAction *lastread = get_last_action(act->get_tid());
803 lastread->process_rmw(act);
805 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
811 * @brief Updates the mo_graph with the constraints imposed from the current
814 * Basic idea is the following: Go through each other thread and find
815 * the last action that happened before our read. Two cases:
817 * -# The action is a write: that write must either occur before
818 * the write we read from or be the write we read from.
819 * -# The action is a read: the write that that action read from
820 * must occur before the write we read from or be the same write.
822 * @param curr The current action. Must be a read.
823 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
824 * @param check_only If true, then only check whether the current action satisfies
825 * read modification order or not, without modifiying priorset and canprune.
827 * @return True if modification order edges were added; false otherwise
830 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune, bool check_only)
832 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
834 ASSERT(curr->is_read());
836 /* Last SC fence in the current thread */
837 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
839 int tid = curr->get_tid();
840 ModelAction *prev_same_thread = NULL;
841 /* Iterate over all threads */
842 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
843 /* Last SC fence in thread tid */
844 ModelAction *last_sc_fence_thread_local = NULL;
846 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
848 /* Last SC fence in thread tid, before last SC fence in current thread */
849 ModelAction *last_sc_fence_thread_before = NULL;
850 if (last_sc_fence_local)
851 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
853 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
854 if (prev_same_thread != NULL &&
855 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
856 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
860 /* Iterate over actions in thread, starting from most recent */
861 action_list_t *list = &(*thrd_lists)[tid];
862 sllnode<ModelAction *> * rit;
863 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
864 ModelAction *act = rit->getVal();
869 /* Don't want to add reflexive edges on 'rf' */
870 if (act->equals(rf)) {
871 if (act->happens_before(curr))
877 if (act->is_write()) {
878 /* C++, Section 29.3 statement 5 */
879 if (curr->is_seqcst() && last_sc_fence_thread_local &&
880 *act < *last_sc_fence_thread_local) {
881 if (mo_graph->checkReachable(rf, act))
884 priorset->push_back(act);
887 /* C++, Section 29.3 statement 4 */
888 else if (act->is_seqcst() && last_sc_fence_local &&
889 *act < *last_sc_fence_local) {
890 if (mo_graph->checkReachable(rf, act))
893 priorset->push_back(act);
896 /* C++, Section 29.3 statement 6 */
897 else if (last_sc_fence_thread_before &&
898 *act < *last_sc_fence_thread_before) {
899 if (mo_graph->checkReachable(rf, act))
902 priorset->push_back(act);
908 * Include at most one act per-thread that "happens
911 if (act->happens_before(curr)) {
913 if (last_sc_fence_local == NULL ||
914 (*last_sc_fence_local < *act)) {
915 prev_same_thread = act;
918 if (act->is_write()) {
919 if (mo_graph->checkReachable(rf, act))
922 priorset->push_back(act);
924 const ModelAction *prevrf = act->get_reads_from();
925 if (!prevrf->equals(rf)) {
926 if (mo_graph->checkReachable(rf, prevrf))
929 priorset->push_back(prevrf);
931 if (act->get_tid() == curr->get_tid()) {
932 //Can prune curr from obj list
946 * Updates the mo_graph with the constraints imposed from the current write.
948 * Basic idea is the following: Go through each other thread and find
949 * the lastest action that happened before our write. Two cases:
951 * (1) The action is a write => that write must occur before
954 * (2) The action is a read => the write that that action read from
955 * must occur before the current write.
957 * This method also handles two other issues:
959 * (I) Sequential Consistency: Making sure that if the current write is
960 * seq_cst, that it occurs after the previous seq_cst write.
962 * (II) Sending the write back to non-synchronizing reads.
964 * @param curr The current action. Must be a write.
965 * @param send_fv A vector for stashing reads to which we may pass our future
966 * value. If NULL, then don't record any future values.
967 * @return True if modification order edges were added; false otherwise
969 void ModelExecution::w_modification_order(ModelAction *curr)
971 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
973 ASSERT(curr->is_write());
975 SnapList<ModelAction *> edgeset;
977 if (curr->is_seqcst()) {
978 /* We have to at least see the last sequentially consistent write,
979 so we are initialized. */
980 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
981 if (last_seq_cst != NULL) {
982 edgeset.push_back(last_seq_cst);
984 //update map for next query
985 obj_last_sc_map.put(curr->get_location(), curr);
988 /* Last SC fence in the current thread */
989 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
991 /* Iterate over all threads */
992 for (i = 0;i < thrd_lists->size();i++) {
993 /* Last SC fence in thread i, before last SC fence in current thread */
994 ModelAction *last_sc_fence_thread_before = NULL;
995 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
996 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
998 /* Iterate over actions in thread, starting from most recent */
999 action_list_t *list = &(*thrd_lists)[i];
1000 sllnode<ModelAction*>* rit;
1001 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1002 ModelAction *act = rit->getVal();
1005 * 1) If RMW and it actually read from something, then we
1006 * already have all relevant edges, so just skip to next
1009 * 2) If RMW and it didn't read from anything, we should
1010 * whatever edge we can get to speed up convergence.
1012 * 3) If normal write, we need to look at earlier actions, so
1013 * continue processing list.
1015 if (curr->is_rmw()) {
1016 if (curr->get_reads_from() != NULL)
1024 /* C++, Section 29.3 statement 7 */
1025 if (last_sc_fence_thread_before && act->is_write() &&
1026 *act < *last_sc_fence_thread_before) {
1027 edgeset.push_back(act);
1032 * Include at most one act per-thread that "happens
1035 if (act->happens_before(curr)) {
1037 * Note: if act is RMW, just add edge:
1039 * The following edge should be handled elsewhere:
1040 * readfrom(act) --mo--> act
1042 if (act->is_write())
1043 edgeset.push_back(act);
1044 else if (act->is_read()) {
1045 //if previous read accessed a null, just keep going
1046 edgeset.push_back(act->get_reads_from());
1052 mo_graph->addEdges(&edgeset, curr);
1057 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1058 * some constraints. This method checks one the following constraint (others
1059 * require compiler support):
1061 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1062 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1064 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1066 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1068 /* Iterate over all threads */
1069 for (i = 0;i < thrd_lists->size();i++) {
1070 const ModelAction *write_after_read = NULL;
1072 /* Iterate over actions in thread, starting from most recent */
1073 action_list_t *list = &(*thrd_lists)[i];
1074 sllnode<ModelAction *>* rit;
1075 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1076 ModelAction *act = rit->getVal();
1078 /* Don't disallow due to act == reader */
1079 if (!reader->happens_before(act) || reader == act)
1081 else if (act->is_write())
1082 write_after_read = act;
1083 else if (act->is_read() && act->get_reads_from() != NULL)
1084 write_after_read = act->get_reads_from();
1087 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1094 * Computes the clock vector that happens before propagates from this write.
1096 * @param rf The action that might be part of a release sequence. Must be a
1098 * @return ClockVector of happens before relation.
1101 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1102 SnapVector<ModelAction *> * processset = NULL;
1103 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1104 ASSERT(rf->is_write());
1105 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1107 if (processset == NULL)
1108 processset = new SnapVector<ModelAction *>();
1109 processset->push_back(rf);
1112 int i = (processset == NULL) ? 0 : processset->size();
1114 ClockVector * vec = NULL;
1116 if (rf->get_rfcv() != NULL) {
1117 vec = rf->get_rfcv();
1118 } else if (rf->is_acquire() && rf->is_release()) {
1120 } else if (rf->is_release() && !rf->is_rmw()) {
1122 } else if (rf->is_release()) {
1123 //have rmw that is release and doesn't have a rfcv
1124 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1127 //operation that isn't release
1128 if (rf->get_last_fence_release()) {
1130 vec = rf->get_last_fence_release()->get_cv();
1132 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1138 rf = (*processset)[i];
1142 if (processset != NULL)
1148 * Performs various bookkeeping operations for the current ModelAction when it is
1149 * the first atomic action occurred at its memory location.
1151 * For instance, adds uninitialized action to the per-object, per-thread action vector
1152 * and to the action trace list of all thread actions.
1154 * @param act is the ModelAction to process.
1156 void ModelExecution::add_uninit_action_to_lists(ModelAction *act)
1158 int tid = id_to_int(act->get_tid());
1159 ModelAction *uninit = NULL;
1161 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1162 if (list->empty() && act->is_atomic_var()) {
1163 uninit = get_uninitialized_action(act);
1164 uninit_id = id_to_int(uninit->get_tid());
1165 list->push_front(uninit);
1166 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1167 if ((int)vec->size() <= uninit_id) {
1168 int oldsize = (int) vec->size();
1169 vec->resize(uninit_id + 1);
1170 for(int i = oldsize; i < uninit_id + 1; i++) {
1171 new (&(*vec)[i]) action_list_t();
1174 (*vec)[uninit_id].push_front(uninit);
1177 // Update action trace, a total order of all actions
1179 action_trace.push_front(uninit);
1181 // Update obj_thrd_map, a per location, per thread, order of actions
1182 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1183 if ((int)vec->size() <= tid) {
1184 uint oldsize = vec->size();
1185 vec->resize(priv->next_thread_id);
1186 for(uint i = oldsize; i < priv->next_thread_id; i++)
1187 new (&(*vec)[i]) action_list_t();
1190 (*vec)[uninit_id].push_front(uninit);
1192 // Update thrd_last_action, the last action taken by each thrad
1193 if ((int)thrd_last_action.size() <= tid)
1194 thrd_last_action.resize(get_num_threads());
1196 thrd_last_action[uninit_id] = uninit;
1201 * Performs various bookkeeping operations for the current ModelAction. For
1202 * instance, adds action to the per-object, per-thread action vector and to the
1203 * action trace list of all thread actions.
1205 * @param act is the ModelAction to add.
1207 void ModelExecution::add_action_to_lists(ModelAction *act)
1209 int tid = id_to_int(act->get_tid());
1210 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1211 list->push_back(act);
1213 // Update action trace, a total order of all actions
1214 action_trace.push_back(act);
1216 // Update obj_thrd_map, a per location, per thread, order of actions
1217 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1218 if ((int)vec->size() <= tid) {
1219 uint oldsize = vec->size();
1220 vec->resize(priv->next_thread_id);
1221 for(uint i = oldsize; i < priv->next_thread_id; i++)
1222 new (&(*vec)[i]) action_list_t();
1224 (*vec)[tid].push_back(act);
1226 // Update thrd_last_action, the last action taken by each thrad
1227 if ((int)thrd_last_action.size() <= tid)
1228 thrd_last_action.resize(get_num_threads());
1229 thrd_last_action[tid] = act;
1231 // Update thrd_last_fence_release, the last release fence taken by each thread
1232 if (act->is_fence() && act->is_release()) {
1233 if ((int)thrd_last_fence_release.size() <= tid)
1234 thrd_last_fence_release.resize(get_num_threads());
1235 thrd_last_fence_release[tid] = act;
1238 if (act->is_wait()) {
1239 void *mutex_loc = (void *) act->get_value();
1240 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1242 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1243 if ((int)vec->size() <= tid) {
1244 uint oldsize = vec->size();
1245 vec->resize(priv->next_thread_id);
1246 for(uint i = oldsize; i < priv->next_thread_id; i++)
1247 new (&(*vec)[i]) action_list_t();
1249 (*vec)[tid].push_back(act);
1253 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1254 sllnode<ModelAction*> * rit = list->end();
1255 modelclock_t next_seq = act->get_seq_number();
1256 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1257 list->push_back(act);
1259 for(;rit != NULL;rit=rit->getPrev()) {
1260 if (rit->getVal()->get_seq_number() == next_seq) {
1261 list->insertAfter(rit, act);
1268 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1269 sllnode<ModelAction*> * rit = list->end();
1270 modelclock_t next_seq = act->get_seq_number();
1272 act->create_cv(NULL);
1273 } else if (rit->getVal()->get_seq_number() == next_seq) {
1274 act->create_cv(rit->getVal());
1275 list->push_back(act);
1277 for(;rit != NULL;rit=rit->getPrev()) {
1278 if (rit->getVal()->get_seq_number() == next_seq) {
1279 act->create_cv(rit->getVal());
1280 list->insertAfter(rit, act);
1288 * Performs various bookkeeping operations for a normal write. The
1289 * complication is that we are typically inserting a normal write
1290 * lazily, so we need to insert it into the middle of lists.
1292 * @param act is the ModelAction to add.
1295 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1297 int tid = id_to_int(act->get_tid());
1298 insertIntoActionListAndSetCV(&action_trace, act);
1300 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1301 insertIntoActionList(list, act);
1303 // Update obj_thrd_map, a per location, per thread, order of actions
1304 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1305 if (tid >= (int)vec->size()) {
1306 uint oldsize =vec->size();
1307 vec->resize(priv->next_thread_id);
1308 for(uint i=oldsize;i<priv->next_thread_id;i++)
1309 new (&(*vec)[i]) action_list_t();
1311 insertIntoActionList(&(*vec)[tid],act);
1313 // Update thrd_last_action, the last action taken by each thrad
1314 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1315 thrd_last_action[tid] = act;
1319 void ModelExecution::add_write_to_lists(ModelAction *write) {
1320 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1321 int tid = id_to_int(write->get_tid());
1322 if (tid >= (int)vec->size()) {
1323 uint oldsize =vec->size();
1324 vec->resize(priv->next_thread_id);
1325 for(uint i=oldsize;i<priv->next_thread_id;i++)
1326 new (&(*vec)[i]) action_list_t();
1328 (*vec)[tid].push_back(write);
1332 * @brief Get the last action performed by a particular Thread
1333 * @param tid The thread ID of the Thread in question
1334 * @return The last action in the thread
1336 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1338 int threadid = id_to_int(tid);
1339 if (threadid < (int)thrd_last_action.size())
1340 return thrd_last_action[id_to_int(tid)];
1346 * @brief Get the last fence release performed by a particular Thread
1347 * @param tid The thread ID of the Thread in question
1348 * @return The last fence release in the thread, if one exists; NULL otherwise
1350 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1352 int threadid = id_to_int(tid);
1353 if (threadid < (int)thrd_last_fence_release.size())
1354 return thrd_last_fence_release[id_to_int(tid)];
1360 * Gets the last memory_order_seq_cst write (in the total global sequence)
1361 * performed on a particular object (i.e., memory location), not including the
1363 * @param curr The current ModelAction; also denotes the object location to
1365 * @return The last seq_cst write
1367 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1369 void *location = curr->get_location();
1370 return obj_last_sc_map.get(location);
1374 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1375 * performed in a particular thread, prior to a particular fence.
1376 * @param tid The ID of the thread to check
1377 * @param before_fence The fence from which to begin the search; if NULL, then
1378 * search for the most recent fence in the thread.
1379 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1381 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1383 /* All fences should have location FENCE_LOCATION */
1384 action_list_t *list = obj_map.get(FENCE_LOCATION);
1389 sllnode<ModelAction*>* rit = list->end();
1392 for (;rit != NULL;rit=rit->getPrev())
1393 if (rit->getVal() == before_fence)
1396 ASSERT(rit->getVal() == before_fence);
1400 for (;rit != NULL;rit=rit->getPrev()) {
1401 ModelAction *act = rit->getVal();
1402 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1409 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1410 * location). This function identifies the mutex according to the current
1411 * action, which is presumed to perform on the same mutex.
1412 * @param curr The current ModelAction; also denotes the object location to
1414 * @return The last unlock operation
1416 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1418 void *location = curr->get_location();
1420 action_list_t *list = obj_map.get(location);
1421 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1422 sllnode<ModelAction*>* rit;
1423 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1424 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1425 return rit->getVal();
1429 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1431 ModelAction *parent = get_last_action(tid);
1433 parent = get_thread(tid)->get_creation();
1438 * Returns the clock vector for a given thread.
1439 * @param tid The thread whose clock vector we want
1440 * @return Desired clock vector
1442 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1444 ModelAction *firstaction=get_parent_action(tid);
1445 return firstaction != NULL ? firstaction->get_cv() : NULL;
1448 bool valequals(uint64_t val1, uint64_t val2, int size) {
1451 return ((uint8_t)val1) == ((uint8_t)val2);
1453 return ((uint16_t)val1) == ((uint16_t)val2);
1455 return ((uint32_t)val1) == ((uint32_t)val2);
1465 * Build up an initial set of all past writes that this 'read' action may read
1466 * from, as well as any previously-observed future values that must still be valid.
1468 * @param curr is the current ModelAction that we are exploring; it must be a
1471 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1473 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1475 ASSERT(curr->is_read());
1477 ModelAction *last_sc_write = NULL;
1479 if (curr->is_seqcst())
1480 last_sc_write = get_last_seq_cst_write(curr);
1482 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1484 /* Iterate over all threads */
1485 for (i = 0;i < thrd_lists->size();i++) {
1486 /* Iterate over actions in thread, starting from most recent */
1487 action_list_t *list = &(*thrd_lists)[i];
1488 sllnode<ModelAction *> * rit;
1489 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1490 ModelAction *act = rit->getVal();
1495 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1496 bool allow_read = true;
1498 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1501 /* Need to check whether we will have two RMW reading from the same value */
1502 if (curr->is_rmwr()) {
1503 /* It is okay if we have a failing CAS */
1504 if (!curr->is_rmwrcas() ||
1505 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1506 //Need to make sure we aren't the second RMW
1507 CycleNode * node = mo_graph->getNode_noCreate(act);
1508 if (node != NULL && node->getRMW() != NULL) {
1509 //we are the second RMW
1516 /* Only add feasible reads */
1517 rf_set->push_back(act);
1520 /* Include at most one act per-thread that "happens before" curr */
1521 if (act->happens_before(curr))
1526 if (DBG_ENABLED()) {
1527 model_print("Reached read action:\n");
1529 model_print("End printing read_from_past\n");
1535 * @brief Get an action representing an uninitialized atomic
1537 * This function may create a new one.
1539 * @param curr The current action, which prompts the creation of an UNINIT action
1540 * @return A pointer to the UNINIT ModelAction
1542 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1544 ModelAction *act = curr->get_uninit_action();
1546 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1547 curr->set_uninit_action(act);
1549 act->create_cv(NULL);
1553 static void print_list(action_list_t *list)
1555 sllnode<ModelAction*> *it;
1557 model_print("------------------------------------------------------------------------------------\n");
1558 model_print("# t Action type MO Location Value Rf CV\n");
1559 model_print("------------------------------------------------------------------------------------\n");
1561 unsigned int hash = 0;
1563 for (it = list->begin();it != NULL;it=it->getNext()) {
1564 const ModelAction *act = it->getVal();
1565 if (act->get_seq_number() > 0)
1567 hash = hash^(hash<<3)^(it->getVal()->hash());
1569 model_print("HASH %u\n", hash);
1570 model_print("------------------------------------------------------------------------------------\n");
1573 #if SUPPORT_MOD_ORDER_DUMP
1574 void ModelExecution::dumpGraph(char *filename)
1577 sprintf(buffer, "%s.dot", filename);
1578 FILE *file = fopen(buffer, "w");
1579 fprintf(file, "digraph %s {\n", filename);
1580 mo_graph->dumpNodes(file);
1581 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1583 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1584 ModelAction *act = it->getVal();
1585 if (act->is_read()) {
1586 mo_graph->dot_print_node(file, act);
1587 mo_graph->dot_print_edge(file,
1588 act->get_reads_from(),
1590 "label=\"rf\", color=red, weight=2");
1592 if (thread_array[act->get_tid()]) {
1593 mo_graph->dot_print_edge(file,
1594 thread_array[id_to_int(act->get_tid())],
1596 "label=\"sb\", color=blue, weight=400");
1599 thread_array[act->get_tid()] = act;
1601 fprintf(file, "}\n");
1602 model_free(thread_array);
1607 /** @brief Prints an execution trace summary. */
1608 void ModelExecution::print_summary()
1610 #if SUPPORT_MOD_ORDER_DUMP
1611 char buffername[100];
1612 sprintf(buffername, "exec%04u", get_execution_number());
1613 mo_graph->dumpGraphToFile(buffername);
1614 sprintf(buffername, "graph%04u", get_execution_number());
1615 dumpGraph(buffername);
1618 model_print("Execution trace %d:", get_execution_number());
1619 if (isfeasibleprefix()) {
1620 if (scheduler->all_threads_sleeping())
1621 model_print(" SLEEP-SET REDUNDANT");
1622 if (have_bug_reports())
1623 model_print(" DETECTED BUG(S)");
1625 print_infeasibility(" INFEASIBLE");
1628 print_list(&action_trace);
1634 * Add a Thread to the system for the first time. Should only be called once
1636 * @param t The Thread to add
1638 void ModelExecution::add_thread(Thread *t)
1640 unsigned int i = id_to_int(t->get_id());
1641 if (i >= thread_map.size())
1642 thread_map.resize(i + 1);
1644 if (!t->is_model_thread())
1645 scheduler->add_thread(t);
1649 * @brief Get a Thread reference by its ID
1650 * @param tid The Thread's ID
1651 * @return A Thread reference
1653 Thread * ModelExecution::get_thread(thread_id_t tid) const
1655 unsigned int i = id_to_int(tid);
1656 if (i < thread_map.size())
1657 return thread_map[i];
1662 * @brief Get a reference to the Thread in which a ModelAction was executed
1663 * @param act The ModelAction
1664 * @return A Thread reference
1666 Thread * ModelExecution::get_thread(const ModelAction *act) const
1668 return get_thread(act->get_tid());
1672 * @brief Get a Thread reference by its pthread ID
1673 * @param index The pthread's ID
1674 * @return A Thread reference
1676 Thread * ModelExecution::get_pthread(pthread_t pid) {
1682 uint32_t thread_id = x.v;
1683 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1688 * @brief Check if a Thread is currently enabled
1689 * @param t The Thread to check
1690 * @return True if the Thread is currently enabled
1692 bool ModelExecution::is_enabled(Thread *t) const
1694 return scheduler->is_enabled(t);
1698 * @brief Check if a Thread is currently enabled
1699 * @param tid The ID of the Thread to check
1700 * @return True if the Thread is currently enabled
1702 bool ModelExecution::is_enabled(thread_id_t tid) const
1704 return scheduler->is_enabled(tid);
1708 * @brief Select the next thread to execute based on the curren action
1710 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1711 * actions should be followed by the execution of their child thread. In either
1712 * case, the current action should determine the next thread schedule.
1714 * @param curr The current action
1715 * @return The next thread to run, if the current action will determine this
1716 * selection; otherwise NULL
1718 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1720 /* Do not split atomic RMW */
1721 if (curr->is_rmwr() && !paused_by_fuzzer(curr))
1722 return get_thread(curr);
1723 /* Follow CREATE with the created thread */
1724 /* which is not needed, because model.cc takes care of this */
1725 if (curr->get_type() == THREAD_CREATE)
1726 return curr->get_thread_operand();
1727 if (curr->get_type() == PTHREAD_CREATE) {
1728 return curr->get_thread_operand();
1733 /** @param act A read atomic action */
1734 bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
1736 ASSERT(act->is_read());
1738 // Actions paused by fuzzer have their sequence number reset to 0
1739 return act->get_seq_number() == 0;
1743 * Takes the next step in the execution, if possible.
1744 * @param curr The current step to take
1745 * @return Returns the next Thread to run, if any; NULL if this execution
1748 Thread * ModelExecution::take_step(ModelAction *curr)
1750 Thread *curr_thrd = get_thread(curr);
1751 ASSERT(curr_thrd->get_state() == THREAD_READY);
1753 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1754 curr = check_current_action(curr);
1757 /* Process this action in ModelHistory for records */
1758 model->get_history()->process_action( curr, curr->get_tid() );
1760 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1761 scheduler->remove_thread(curr_thrd);
1763 return action_select_next_thread(curr);
1766 Fuzzer * ModelExecution::getFuzzer() {