11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
31 bad_synchronization(false),
35 ~model_snapshot_members() {
36 for (unsigned int i = 0;i < bugs.size();i++)
41 unsigned int next_thread_id;
42 modelclock_t used_sequence_numbers;
43 SnapVector<bug_message *> bugs;
44 /** @brief Incorrectly-ordered synchronization was made */
45 bool bad_synchronization;
51 /** @brief Constructor */
52 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
57 thread_map(2), /* We'll always need at least 2 threads */
61 condvar_waiters_map(),
65 thrd_last_fence_release(),
66 priv(new struct model_snapshot_members ()),
67 mo_graph(new CycleGraph()),
68 fuzzer(new NewFuzzer()),
70 thrd_func_act_lists(),
73 /* Initialize a model-checker thread, for special ModelActions */
74 model_thread = new Thread(get_next_id());
75 add_thread(model_thread);
76 scheduler->register_engine(this);
77 fuzzer->register_engine(m->get_history(), this);
80 /** @brief Destructor */
81 ModelExecution::~ModelExecution()
83 for (unsigned int i = 0;i < get_num_threads();i++)
84 delete get_thread(int_to_id(i));
90 int ModelExecution::get_execution_number() const
92 return model->get_execution_number();
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
97 action_list_t *tmp = hash->get(ptr);
99 tmp = new action_list_t();
105 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
107 SnapVector<action_list_t> *tmp = hash->get(ptr);
109 tmp = new SnapVector<action_list_t>();
115 /** @return a thread ID for a new Thread */
116 thread_id_t ModelExecution::get_next_id()
118 return priv->next_thread_id++;
121 /** @return the number of user threads created during this execution */
122 unsigned int ModelExecution::get_num_threads() const
124 return priv->next_thread_id;
127 /** @return a sequence number for a new ModelAction */
128 modelclock_t ModelExecution::get_next_seq_num()
130 return ++priv->used_sequence_numbers;
133 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
134 void ModelExecution::restore_last_seq_num()
136 priv->used_sequence_numbers--;
140 * @brief Should the current action wake up a given thread?
142 * @param curr The current action
143 * @param thread The thread that we might wake up
144 * @return True, if we should wake up the sleeping thread; false otherwise
146 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
148 const ModelAction *asleep = thread->get_pending();
149 /* Don't allow partial RMW to wake anyone up */
152 /* Synchronizing actions may have been backtracked */
153 if (asleep->could_synchronize_with(curr))
155 /* All acquire/release fences and fence-acquire/store-release */
156 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
158 /* Fence-release + store can awake load-acquire on the same location */
159 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
160 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
161 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
164 if (asleep->is_sleep()) {
165 if (fuzzer->shouldWake(asleep))
172 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
174 for (unsigned int i = 0;i < get_num_threads();i++) {
175 Thread *thr = get_thread(int_to_id(i));
176 if (scheduler->is_sleep_set(thr)) {
177 if (should_wake_up(curr, thr)) {
178 /* Remove this thread from sleep set */
179 scheduler->remove_sleep(thr);
180 if (thr->get_pending()->is_sleep())
181 thr->set_pending(NULL);
187 /** @brief Alert the model-checker that an incorrectly-ordered
188 * synchronization was made */
189 void ModelExecution::set_bad_synchronization()
191 priv->bad_synchronization = true;
194 bool ModelExecution::assert_bug(const char *msg)
196 priv->bugs.push_back(new bug_message(msg));
198 if (isfeasibleprefix()) {
205 /** @return True, if any bugs have been reported for this execution */
206 bool ModelExecution::have_bug_reports() const
208 return priv->bugs.size() != 0;
211 SnapVector<bug_message *> * ModelExecution::get_bugs() const
217 * Check whether the current trace has triggered an assertion which should halt
220 * @return True, if the execution should be aborted; false otherwise
222 bool ModelExecution::has_asserted() const
224 return priv->asserted;
228 * Trigger a trace assertion which should cause this execution to be halted.
229 * This can be due to a detected bug or due to an infeasibility that should
232 void ModelExecution::set_assert()
234 priv->asserted = true;
238 * Check if we are in a deadlock. Should only be called at the end of an
239 * execution, although it should not give false positives in the middle of an
240 * execution (there should be some ENABLED thread).
242 * @return True if program is in a deadlock; false otherwise
244 bool ModelExecution::is_deadlocked() const
246 bool blocking_threads = false;
247 for (unsigned int i = 0;i < get_num_threads();i++) {
248 thread_id_t tid = int_to_id(i);
251 Thread *t = get_thread(tid);
252 if (!t->is_model_thread() && t->get_pending())
253 blocking_threads = true;
255 return blocking_threads;
259 * Check if this is a complete execution. That is, have all thread completed
260 * execution (rather than exiting because sleep sets have forced a redundant
263 * @return True if the execution is complete.
265 bool ModelExecution::is_complete_execution() const
267 for (unsigned int i = 0;i < get_num_threads();i++)
268 if (is_enabled(int_to_id(i)))
273 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
274 uint64_t value = *((const uint64_t *) location);
275 modelclock_t storeclock;
276 thread_id_t storethread;
277 getStoreThreadAndClock(location, &storethread, &storeclock);
278 setAtomicStoreFlag(location);
279 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
280 act->set_seq_number(storeclock);
281 add_normal_write_to_lists(act);
282 add_write_to_lists(act);
283 w_modification_order(act);
284 model->get_history()->process_action(act, act->get_tid());
289 * Processes a read model action.
290 * @param curr is the read model action to process.
291 * @param rf_set is the set of model actions we can possibly read from
292 * @return True if processing this read updates the mo_graph.
294 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
296 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
297 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
298 if (hasnonatomicstore) {
299 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
300 rf_set->push_back(nonatomicstore);
303 // Remove writes that violate read modification order
304 for (uint i = 0; i < rf_set->size(); i++) {
305 ModelAction * rf = (*rf_set)[i];
306 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
307 (*rf_set)[i] = rf_set->back();
313 int index = fuzzer->selectWrite(curr, rf_set);
314 if (index == -1) // no feasible write exists
317 ModelAction *rf = (*rf_set)[index];
320 bool canprune = false;
321 if (r_modification_order(curr, rf, priorset, &canprune)) {
322 for(unsigned int i=0;i<priorset->size();i++) {
323 mo_graph->addEdge((*priorset)[i], rf);
326 get_thread(curr)->set_return_value(curr->get_return_value());
328 if (canprune && curr->get_type() == ATOMIC_READ) {
329 int tid = id_to_int(curr->get_tid());
330 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
335 (*rf_set)[index] = rf_set->back();
341 * Processes a lock, trylock, or unlock model action. @param curr is
342 * the read model action to process.
344 * The try lock operation checks whether the lock is taken. If not,
345 * it falls to the normal lock operation case. If so, it returns
348 * The lock operation has already been checked that it is enabled, so
349 * it just grabs the lock and synchronizes with the previous unlock.
351 * The unlock operation has to re-enable all of the threads that are
352 * waiting on the lock.
354 * @return True if synchronization was updated; false otherwise
356 bool ModelExecution::process_mutex(ModelAction *curr)
358 cdsc::mutex *mutex = curr->get_mutex();
359 struct cdsc::mutex_state *state = NULL;
362 state = mutex->get_state();
364 switch (curr->get_type()) {
365 case ATOMIC_TRYLOCK: {
366 bool success = !state->locked;
367 curr->set_try_lock(success);
369 get_thread(curr)->set_return_value(0);
372 get_thread(curr)->set_return_value(1);
374 //otherwise fall into the lock case
376 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
377 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
378 // assert_bug("Lock access before initialization");
379 state->locked = get_thread(curr);
380 ModelAction *unlock = get_last_unlock(curr);
381 //synchronize with the previous unlock statement
382 if (unlock != NULL) {
383 synchronize(unlock, curr);
389 /* wake up the other threads */
390 for (unsigned int i = 0;i < get_num_threads();i++) {
391 Thread *t = get_thread(int_to_id(i));
392 Thread *curr_thrd = get_thread(curr);
393 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
397 /* unlock the lock - after checking who was waiting on it */
398 state->locked = NULL;
400 if (fuzzer->shouldWait(curr)) {
401 /* disable this thread */
402 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
403 scheduler->sleep(get_thread(curr));
408 case ATOMIC_TIMEDWAIT:
409 case ATOMIC_UNLOCK: {
410 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
412 /* wake up the other threads */
413 for (unsigned int i = 0;i < get_num_threads();i++) {
414 Thread *t = get_thread(int_to_id(i));
415 Thread *curr_thrd = get_thread(curr);
416 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
420 /* unlock the lock - after checking who was waiting on it */
421 state->locked = NULL;
424 case ATOMIC_NOTIFY_ALL: {
425 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
426 //activate all the waiting threads
427 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
428 scheduler->wake(get_thread(rit->getVal()));
433 case ATOMIC_NOTIFY_ONE: {
434 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
435 if (waiters->size() != 0) {
436 Thread * thread = fuzzer->selectNotify(waiters);
437 scheduler->wake(thread);
449 * Process a write ModelAction
450 * @param curr The ModelAction to process
451 * @return True if the mo_graph was updated or promises were resolved
453 void ModelExecution::process_write(ModelAction *curr)
455 w_modification_order(curr);
456 get_thread(curr)->set_return_value(VALUE_NONE);
460 * Process a fence ModelAction
461 * @param curr The ModelAction to process
462 * @return True if synchronization was updated
464 bool ModelExecution::process_fence(ModelAction *curr)
467 * fence-relaxed: no-op
468 * fence-release: only log the occurence (not in this function), for
469 * use in later synchronization
470 * fence-acquire (this function): search for hypothetical release
472 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
474 bool updated = false;
475 if (curr->is_acquire()) {
476 action_list_t *list = &action_trace;
477 sllnode<ModelAction *> * rit;
478 /* Find X : is_read(X) && X --sb-> curr */
479 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
480 ModelAction *act = rit->getVal();
483 if (act->get_tid() != curr->get_tid())
485 /* Stop at the beginning of the thread */
486 if (act->is_thread_start())
488 /* Stop once we reach a prior fence-acquire */
489 if (act->is_fence() && act->is_acquire())
493 /* read-acquire will find its own release sequences */
494 if (act->is_acquire())
497 /* Establish hypothetical release sequences */
498 ClockVector *cv = get_hb_from_write(act->get_reads_from());
499 if (cv != NULL && curr->get_cv()->merge(cv))
507 * @brief Process the current action for thread-related activity
509 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
510 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
511 * synchronization, etc. This function is a no-op for non-THREAD actions
512 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
514 * @param curr The current action
515 * @return True if synchronization was updated or a thread completed
517 void ModelExecution::process_thread_action(ModelAction *curr)
519 switch (curr->get_type()) {
520 case THREAD_CREATE: {
521 thrd_t *thrd = (thrd_t *)curr->get_location();
522 struct thread_params *params = (struct thread_params *)curr->get_value();
523 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
524 curr->set_thread_operand(th);
526 th->set_creation(curr);
529 case PTHREAD_CREATE: {
530 (*(uint32_t *)curr->get_location()) = pthread_counter++;
532 struct pthread_params *params = (struct pthread_params *)curr->get_value();
533 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
534 curr->set_thread_operand(th);
536 th->set_creation(curr);
538 if ( pthread_map.size() < pthread_counter )
539 pthread_map.resize( pthread_counter );
540 pthread_map[ pthread_counter-1 ] = th;
545 Thread *blocking = curr->get_thread_operand();
546 ModelAction *act = get_last_action(blocking->get_id());
547 synchronize(act, curr);
551 Thread *blocking = curr->get_thread_operand();
552 ModelAction *act = get_last_action(blocking->get_id());
553 synchronize(act, curr);
554 break; // WL: to be add (modified)
557 case THREADONLY_FINISH:
558 case THREAD_FINISH: {
559 Thread *th = get_thread(curr);
560 if (curr->get_type() == THREAD_FINISH &&
561 th == model->getInitThread()) {
567 /* Wake up any joining threads */
568 for (unsigned int i = 0;i < get_num_threads();i++) {
569 Thread *waiting = get_thread(int_to_id(i));
570 if (waiting->waiting_on() == th &&
571 waiting->get_pending()->is_thread_join())
572 scheduler->wake(waiting);
581 Thread *th = get_thread(curr);
582 th->set_pending(curr);
583 scheduler->add_sleep(th);
592 * Initialize the current action by performing one or more of the following
593 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
594 * manipulating backtracking sets, allocating and
595 * initializing clock vectors, and computing the promises to fulfill.
597 * @param curr The current action, as passed from the user context; may be
598 * freed/invalidated after the execution of this function, with a different
599 * action "returned" its place (pass-by-reference)
600 * @return True if curr is a newly-explored action; false otherwise
602 bool ModelExecution::initialize_curr_action(ModelAction **curr)
604 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
605 ModelAction *newcurr = process_rmw(*curr);
611 ModelAction *newcurr = *curr;
613 newcurr->set_seq_number(get_next_seq_num());
614 /* Always compute new clock vector */
615 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
617 /* Assign most recent release fence */
618 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
620 return true; /* This was a new ModelAction */
625 * @brief Establish reads-from relation between two actions
627 * Perform basic operations involved with establishing a concrete rf relation,
628 * including setting the ModelAction data and checking for release sequences.
630 * @param act The action that is reading (must be a read)
631 * @param rf The action from which we are reading (must be a write)
633 * @return True if this read established synchronization
636 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
639 ASSERT(rf->is_write());
641 act->set_read_from(rf);
642 if (act->is_acquire()) {
643 ClockVector *cv = get_hb_from_write(rf);
646 act->get_cv()->merge(cv);
651 * @brief Synchronizes two actions
653 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
654 * This function performs the synchronization as well as providing other hooks
655 * for other checks along with synchronization.
657 * @param first The left-hand side of the synchronizes-with relation
658 * @param second The right-hand side of the synchronizes-with relation
659 * @return True if the synchronization was successful (i.e., was consistent
660 * with the execution order); false otherwise
662 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
664 if (*second < *first) {
665 set_bad_synchronization();
668 return second->synchronize_with(first);
672 * @brief Check whether a model action is enabled.
674 * Checks whether an operation would be successful (i.e., is a lock already
675 * locked, or is the joined thread already complete).
677 * For yield-blocking, yields are never enabled.
679 * @param curr is the ModelAction to check whether it is enabled.
680 * @return a bool that indicates whether the action is enabled.
682 bool ModelExecution::check_action_enabled(ModelAction *curr) {
683 if (curr->is_lock()) {
684 cdsc::mutex *lock = curr->get_mutex();
685 struct cdsc::mutex_state *state = lock->get_state();
688 } else if (curr->is_thread_join()) {
689 Thread *blocking = curr->get_thread_operand();
690 if (!blocking->is_complete()) {
693 } else if (curr->is_sleep()) {
694 if (!fuzzer->shouldSleep(curr))
702 * This is the heart of the model checker routine. It performs model-checking
703 * actions corresponding to a given "current action." Among other processes, it
704 * calculates reads-from relationships, updates synchronization clock vectors,
705 * forms a memory_order constraints graph, and handles replay/backtrack
706 * execution when running permutations of previously-observed executions.
708 * @param curr The current action to process
709 * @return The ModelAction that is actually executed; may be different than
712 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
715 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
716 bool newly_explored = initialize_curr_action(&curr);
720 wake_up_sleeping_actions(curr);
722 /* Add uninitialized actions to lists */
723 if (!second_part_of_rmw)
724 add_uninit_action_to_lists(curr);
726 SnapVector<ModelAction *> * rf_set = NULL;
727 /* Build may_read_from set for newly-created actions */
728 if (newly_explored && curr->is_read())
729 rf_set = build_may_read_from(curr);
731 if (curr->is_read() && !second_part_of_rmw) {
732 process_read(curr, rf_set);
735 /* bool success = process_read(curr, rf_set);
738 return curr; // Do not add action to lists
741 ASSERT(rf_set == NULL);
743 /* Add the action to lists */
744 if (!second_part_of_rmw)
745 add_action_to_lists(curr);
747 if (curr->is_write())
748 add_write_to_lists(curr);
750 process_thread_action(curr);
752 if (curr->is_write())
755 if (curr->is_fence())
758 if (curr->is_mutex_op())
765 * This is the strongest feasibility check available.
766 * @return whether the current trace (partial or complete) must be a prefix of
769 bool ModelExecution::isfeasibleprefix() const
771 return !is_infeasible();
775 * Print disagnostic information about an infeasible execution
776 * @param prefix A string to prefix the output with; if NULL, then a default
777 * message prefix will be provided
779 void ModelExecution::print_infeasibility(const char *prefix) const
783 if (priv->bad_synchronization)
784 ptr += sprintf(ptr, "[bad sw ordering]");
786 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
790 * Check if the current partial trace is infeasible. Does not check any
791 * end-of-execution flags, which might rule out the execution. Thus, this is
792 * useful only for ruling an execution as infeasible.
793 * @return whether the current partial trace is infeasible.
795 bool ModelExecution::is_infeasible() const
797 return priv->bad_synchronization;
800 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
801 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
802 ModelAction *lastread = get_last_action(act->get_tid());
803 lastread->process_rmw(act);
805 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
811 * @brief Updates the mo_graph with the constraints imposed from the current
814 * Basic idea is the following: Go through each other thread and find
815 * the last action that happened before our read. Two cases:
817 * -# The action is a write: that write must either occur before
818 * the write we read from or be the write we read from.
819 * -# The action is a read: the write that that action read from
820 * must occur before the write we read from or be the same write.
822 * @param curr The current action. Must be a read.
823 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
824 * @param check_only Only check if the current action satisfies read
825 * modification order or not, without modifiying priorsetand canprune
826 * @return True if modification order edges were added; false otherwise
829 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune, bool check_only)
831 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
833 ASSERT(curr->is_read());
835 /* Last SC fence in the current thread */
836 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
838 int tid = curr->get_tid();
839 ModelAction *prev_same_thread = NULL;
840 /* Iterate over all threads */
841 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
842 /* Last SC fence in thread tid */
843 ModelAction *last_sc_fence_thread_local = NULL;
845 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
847 /* Last SC fence in thread tid, before last SC fence in current thread */
848 ModelAction *last_sc_fence_thread_before = NULL;
849 if (last_sc_fence_local)
850 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
852 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
853 if (prev_same_thread != NULL &&
854 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
855 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
859 /* Iterate over actions in thread, starting from most recent */
860 action_list_t *list = &(*thrd_lists)[tid];
861 sllnode<ModelAction *> * rit;
862 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
863 ModelAction *act = rit->getVal();
868 /* Don't want to add reflexive edges on 'rf' */
869 if (act->equals(rf)) {
870 if (act->happens_before(curr))
876 if (act->is_write()) {
877 /* C++, Section 29.3 statement 5 */
878 if (curr->is_seqcst() && last_sc_fence_thread_local &&
879 *act < *last_sc_fence_thread_local) {
880 if (mo_graph->checkReachable(rf, act))
883 priorset->push_back(act);
886 /* C++, Section 29.3 statement 4 */
887 else if (act->is_seqcst() && last_sc_fence_local &&
888 *act < *last_sc_fence_local) {
889 if (mo_graph->checkReachable(rf, act))
892 priorset->push_back(act);
895 /* C++, Section 29.3 statement 6 */
896 else if (last_sc_fence_thread_before &&
897 *act < *last_sc_fence_thread_before) {
898 if (mo_graph->checkReachable(rf, act))
901 priorset->push_back(act);
907 * Include at most one act per-thread that "happens
910 if (act->happens_before(curr)) {
912 if (last_sc_fence_local == NULL ||
913 (*last_sc_fence_local < *act)) {
914 prev_same_thread = act;
917 if (act->is_write()) {
918 if (mo_graph->checkReachable(rf, act))
921 priorset->push_back(act);
923 const ModelAction *prevrf = act->get_reads_from();
924 if (!prevrf->equals(rf)) {
925 if (mo_graph->checkReachable(rf, prevrf))
928 priorset->push_back(prevrf);
930 if (act->get_tid() == curr->get_tid()) {
931 //Can prune curr from obj list
945 * Updates the mo_graph with the constraints imposed from the current write.
947 * Basic idea is the following: Go through each other thread and find
948 * the lastest action that happened before our write. Two cases:
950 * (1) The action is a write => that write must occur before
953 * (2) The action is a read => the write that that action read from
954 * must occur before the current write.
956 * This method also handles two other issues:
958 * (I) Sequential Consistency: Making sure that if the current write is
959 * seq_cst, that it occurs after the previous seq_cst write.
961 * (II) Sending the write back to non-synchronizing reads.
963 * @param curr The current action. Must be a write.
964 * @param send_fv A vector for stashing reads to which we may pass our future
965 * value. If NULL, then don't record any future values.
966 * @return True if modification order edges were added; false otherwise
968 void ModelExecution::w_modification_order(ModelAction *curr)
970 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
972 ASSERT(curr->is_write());
974 SnapList<ModelAction *> edgeset;
976 if (curr->is_seqcst()) {
977 /* We have to at least see the last sequentially consistent write,
978 so we are initialized. */
979 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
980 if (last_seq_cst != NULL) {
981 edgeset.push_back(last_seq_cst);
983 //update map for next query
984 obj_last_sc_map.put(curr->get_location(), curr);
987 /* Last SC fence in the current thread */
988 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
990 /* Iterate over all threads */
991 for (i = 0;i < thrd_lists->size();i++) {
992 /* Last SC fence in thread i, before last SC fence in current thread */
993 ModelAction *last_sc_fence_thread_before = NULL;
994 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
995 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
997 /* Iterate over actions in thread, starting from most recent */
998 action_list_t *list = &(*thrd_lists)[i];
999 sllnode<ModelAction*>* rit;
1000 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1001 ModelAction *act = rit->getVal();
1004 * 1) If RMW and it actually read from something, then we
1005 * already have all relevant edges, so just skip to next
1008 * 2) If RMW and it didn't read from anything, we should
1009 * whatever edge we can get to speed up convergence.
1011 * 3) If normal write, we need to look at earlier actions, so
1012 * continue processing list.
1014 if (curr->is_rmw()) {
1015 if (curr->get_reads_from() != NULL)
1023 /* C++, Section 29.3 statement 7 */
1024 if (last_sc_fence_thread_before && act->is_write() &&
1025 *act < *last_sc_fence_thread_before) {
1026 edgeset.push_back(act);
1031 * Include at most one act per-thread that "happens
1034 if (act->happens_before(curr)) {
1036 * Note: if act is RMW, just add edge:
1038 * The following edge should be handled elsewhere:
1039 * readfrom(act) --mo--> act
1041 if (act->is_write())
1042 edgeset.push_back(act);
1043 else if (act->is_read()) {
1044 //if previous read accessed a null, just keep going
1045 edgeset.push_back(act->get_reads_from());
1051 mo_graph->addEdges(&edgeset, curr);
1056 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1057 * some constraints. This method checks one the following constraint (others
1058 * require compiler support):
1060 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1061 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1063 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1065 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1067 /* Iterate over all threads */
1068 for (i = 0;i < thrd_lists->size();i++) {
1069 const ModelAction *write_after_read = NULL;
1071 /* Iterate over actions in thread, starting from most recent */
1072 action_list_t *list = &(*thrd_lists)[i];
1073 sllnode<ModelAction *>* rit;
1074 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1075 ModelAction *act = rit->getVal();
1077 /* Don't disallow due to act == reader */
1078 if (!reader->happens_before(act) || reader == act)
1080 else if (act->is_write())
1081 write_after_read = act;
1082 else if (act->is_read() && act->get_reads_from() != NULL)
1083 write_after_read = act->get_reads_from();
1086 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1093 * Computes the clock vector that happens before propagates from this write.
1095 * @param rf The action that might be part of a release sequence. Must be a
1097 * @return ClockVector of happens before relation.
1100 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1101 SnapVector<ModelAction *> * processset = NULL;
1102 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1103 ASSERT(rf->is_write());
1104 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1106 if (processset == NULL)
1107 processset = new SnapVector<ModelAction *>();
1108 processset->push_back(rf);
1111 int i = (processset == NULL) ? 0 : processset->size();
1113 ClockVector * vec = NULL;
1115 if (rf->get_rfcv() != NULL) {
1116 vec = rf->get_rfcv();
1117 } else if (rf->is_acquire() && rf->is_release()) {
1119 } else if (rf->is_release() && !rf->is_rmw()) {
1121 } else if (rf->is_release()) {
1122 //have rmw that is release and doesn't have a rfcv
1123 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1126 //operation that isn't release
1127 if (rf->get_last_fence_release()) {
1129 vec = rf->get_last_fence_release()->get_cv();
1131 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1137 rf = (*processset)[i];
1141 if (processset != NULL)
1147 * Performs various bookkeeping operations for the current ModelAction when it is
1148 * the first atomic action occurred at its memory location.
1150 * For instance, adds uninitialized action to the per-object, per-thread action vector
1151 * and to the action trace list of all thread actions.
1153 * @param act is the ModelAction to process.
1155 void ModelExecution::add_uninit_action_to_lists(ModelAction *act)
1157 int tid = id_to_int(act->get_tid());
1158 ModelAction *uninit = NULL;
1160 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1161 if (list->empty() && act->is_atomic_var()) {
1162 uninit = get_uninitialized_action(act);
1163 uninit_id = id_to_int(uninit->get_tid());
1164 list->push_front(uninit);
1165 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1166 if ((int)vec->size() <= uninit_id) {
1167 int oldsize = (int) vec->size();
1168 vec->resize(uninit_id + 1);
1169 for(int i = oldsize; i < uninit_id + 1; i++) {
1170 new (&(*vec)[i]) action_list_t();
1173 (*vec)[uninit_id].push_front(uninit);
1176 // Update action trace, a total order of all actions
1178 action_trace.push_front(uninit);
1180 // Update obj_thrd_map, a per location, per thread, order of actions
1181 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1182 if ((int)vec->size() <= tid) {
1183 uint oldsize = vec->size();
1184 vec->resize(priv->next_thread_id);
1185 for(uint i = oldsize; i < priv->next_thread_id; i++)
1186 new (&(*vec)[i]) action_list_t();
1189 (*vec)[uninit_id].push_front(uninit);
1191 // Update thrd_last_action, the last action taken by each thrad
1192 if ((int)thrd_last_action.size() <= tid)
1193 thrd_last_action.resize(get_num_threads());
1195 thrd_last_action[uninit_id] = uninit;
1200 * Performs various bookkeeping operations for the current ModelAction. For
1201 * instance, adds action to the per-object, per-thread action vector and to the
1202 * action trace list of all thread actions.
1204 * @param act is the ModelAction to add.
1206 void ModelExecution::add_action_to_lists(ModelAction *act)
1208 int tid = id_to_int(act->get_tid());
1209 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1210 list->push_back(act);
1212 // Update action trace, a total order of all actions
1213 action_trace.push_back(act);
1215 // Update obj_thrd_map, a per location, per thread, order of actions
1216 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1217 if ((int)vec->size() <= tid) {
1218 uint oldsize = vec->size();
1219 vec->resize(priv->next_thread_id);
1220 for(uint i = oldsize; i < priv->next_thread_id; i++)
1221 new (&(*vec)[i]) action_list_t();
1223 (*vec)[tid].push_back(act);
1225 // Update thrd_last_action, the last action taken by each thrad
1226 if ((int)thrd_last_action.size() <= tid)
1227 thrd_last_action.resize(get_num_threads());
1228 thrd_last_action[tid] = act;
1230 // Update thrd_last_fence_release, the last release fence taken by each thread
1231 if (act->is_fence() && act->is_release()) {
1232 if ((int)thrd_last_fence_release.size() <= tid)
1233 thrd_last_fence_release.resize(get_num_threads());
1234 thrd_last_fence_release[tid] = act;
1237 if (act->is_wait()) {
1238 void *mutex_loc = (void *) act->get_value();
1239 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1241 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1242 if ((int)vec->size() <= tid) {
1243 uint oldsize = vec->size();
1244 vec->resize(priv->next_thread_id);
1245 for(uint i = oldsize; i < priv->next_thread_id; i++)
1246 new (&(*vec)[i]) action_list_t();
1248 (*vec)[tid].push_back(act);
1252 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1253 sllnode<ModelAction*> * rit = list->end();
1254 modelclock_t next_seq = act->get_seq_number();
1255 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1256 list->push_back(act);
1258 for(;rit != NULL;rit=rit->getPrev()) {
1259 if (rit->getVal()->get_seq_number() == next_seq) {
1260 list->insertAfter(rit, act);
1267 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1268 sllnode<ModelAction*> * rit = list->end();
1269 modelclock_t next_seq = act->get_seq_number();
1271 act->create_cv(NULL);
1272 } else if (rit->getVal()->get_seq_number() == next_seq) {
1273 act->create_cv(rit->getVal());
1274 list->push_back(act);
1276 for(;rit != NULL;rit=rit->getPrev()) {
1277 if (rit->getVal()->get_seq_number() == next_seq) {
1278 act->create_cv(rit->getVal());
1279 list->insertAfter(rit, act);
1287 * Performs various bookkeeping operations for a normal write. The
1288 * complication is that we are typically inserting a normal write
1289 * lazily, so we need to insert it into the middle of lists.
1291 * @param act is the ModelAction to add.
1294 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1296 int tid = id_to_int(act->get_tid());
1297 insertIntoActionListAndSetCV(&action_trace, act);
1299 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1300 insertIntoActionList(list, act);
1302 // Update obj_thrd_map, a per location, per thread, order of actions
1303 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1304 if (tid >= (int)vec->size()) {
1305 uint oldsize =vec->size();
1306 vec->resize(priv->next_thread_id);
1307 for(uint i=oldsize;i<priv->next_thread_id;i++)
1308 new (&(*vec)[i]) action_list_t();
1310 insertIntoActionList(&(*vec)[tid],act);
1312 // Update thrd_last_action, the last action taken by each thrad
1313 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1314 thrd_last_action[tid] = act;
1318 void ModelExecution::add_write_to_lists(ModelAction *write) {
1319 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1320 int tid = id_to_int(write->get_tid());
1321 if (tid >= (int)vec->size()) {
1322 uint oldsize =vec->size();
1323 vec->resize(priv->next_thread_id);
1324 for(uint i=oldsize;i<priv->next_thread_id;i++)
1325 new (&(*vec)[i]) action_list_t();
1327 (*vec)[tid].push_back(write);
1331 * @brief Get the last action performed by a particular Thread
1332 * @param tid The thread ID of the Thread in question
1333 * @return The last action in the thread
1335 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1337 int threadid = id_to_int(tid);
1338 if (threadid < (int)thrd_last_action.size())
1339 return thrd_last_action[id_to_int(tid)];
1345 * @brief Get the last fence release performed by a particular Thread
1346 * @param tid The thread ID of the Thread in question
1347 * @return The last fence release in the thread, if one exists; NULL otherwise
1349 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1351 int threadid = id_to_int(tid);
1352 if (threadid < (int)thrd_last_fence_release.size())
1353 return thrd_last_fence_release[id_to_int(tid)];
1359 * Gets the last memory_order_seq_cst write (in the total global sequence)
1360 * performed on a particular object (i.e., memory location), not including the
1362 * @param curr The current ModelAction; also denotes the object location to
1364 * @return The last seq_cst write
1366 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1368 void *location = curr->get_location();
1369 return obj_last_sc_map.get(location);
1373 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1374 * performed in a particular thread, prior to a particular fence.
1375 * @param tid The ID of the thread to check
1376 * @param before_fence The fence from which to begin the search; if NULL, then
1377 * search for the most recent fence in the thread.
1378 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1380 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1382 /* All fences should have location FENCE_LOCATION */
1383 action_list_t *list = obj_map.get(FENCE_LOCATION);
1388 sllnode<ModelAction*>* rit = list->end();
1391 for (;rit != NULL;rit=rit->getPrev())
1392 if (rit->getVal() == before_fence)
1395 ASSERT(rit->getVal() == before_fence);
1399 for (;rit != NULL;rit=rit->getPrev()) {
1400 ModelAction *act = rit->getVal();
1401 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1408 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1409 * location). This function identifies the mutex according to the current
1410 * action, which is presumed to perform on the same mutex.
1411 * @param curr The current ModelAction; also denotes the object location to
1413 * @return The last unlock operation
1415 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1417 void *location = curr->get_location();
1419 action_list_t *list = obj_map.get(location);
1420 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1421 sllnode<ModelAction*>* rit;
1422 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1423 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1424 return rit->getVal();
1428 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1430 ModelAction *parent = get_last_action(tid);
1432 parent = get_thread(tid)->get_creation();
1437 * Returns the clock vector for a given thread.
1438 * @param tid The thread whose clock vector we want
1439 * @return Desired clock vector
1441 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1443 ModelAction *firstaction=get_parent_action(tid);
1444 return firstaction != NULL ? firstaction->get_cv() : NULL;
1447 bool valequals(uint64_t val1, uint64_t val2, int size) {
1450 return ((uint8_t)val1) == ((uint8_t)val2);
1452 return ((uint16_t)val1) == ((uint16_t)val2);
1454 return ((uint32_t)val1) == ((uint32_t)val2);
1464 * Build up an initial set of all past writes that this 'read' action may read
1465 * from, as well as any previously-observed future values that must still be valid.
1467 * @param curr is the current ModelAction that we are exploring; it must be a
1470 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1472 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1474 ASSERT(curr->is_read());
1476 ModelAction *last_sc_write = NULL;
1478 if (curr->is_seqcst())
1479 last_sc_write = get_last_seq_cst_write(curr);
1481 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1483 /* Iterate over all threads */
1484 for (i = 0;i < thrd_lists->size();i++) {
1485 /* Iterate over actions in thread, starting from most recent */
1486 action_list_t *list = &(*thrd_lists)[i];
1487 sllnode<ModelAction *> * rit;
1488 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1489 ModelAction *act = rit->getVal();
1494 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1495 bool allow_read = true;
1497 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1500 /* Need to check whether we will have two RMW reading from the same value */
1501 if (curr->is_rmwr()) {
1502 /* It is okay if we have a failing CAS */
1503 if (!curr->is_rmwrcas() ||
1504 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1505 //Need to make sure we aren't the second RMW
1506 CycleNode * node = mo_graph->getNode_noCreate(act);
1507 if (node != NULL && node->getRMW() != NULL) {
1508 //we are the second RMW
1515 /* Only add feasible reads */
1516 rf_set->push_back(act);
1519 /* Include at most one act per-thread that "happens before" curr */
1520 if (act->happens_before(curr))
1525 if (DBG_ENABLED()) {
1526 model_print("Reached read action:\n");
1528 model_print("End printing read_from_past\n");
1534 * @brief Get an action representing an uninitialized atomic
1536 * This function may create a new one.
1538 * @param curr The current action, which prompts the creation of an UNINIT action
1539 * @return A pointer to the UNINIT ModelAction
1541 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1543 ModelAction *act = curr->get_uninit_action();
1545 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1546 curr->set_uninit_action(act);
1548 act->create_cv(NULL);
1552 static void print_list(action_list_t *list)
1554 sllnode<ModelAction*> *it;
1556 model_print("------------------------------------------------------------------------------------\n");
1557 model_print("# t Action type MO Location Value Rf CV\n");
1558 model_print("------------------------------------------------------------------------------------\n");
1560 unsigned int hash = 0;
1562 for (it = list->begin();it != NULL;it=it->getNext()) {
1563 const ModelAction *act = it->getVal();
1564 if (act->get_seq_number() > 0)
1566 hash = hash^(hash<<3)^(it->getVal()->hash());
1568 model_print("HASH %u\n", hash);
1569 model_print("------------------------------------------------------------------------------------\n");
1572 #if SUPPORT_MOD_ORDER_DUMP
1573 void ModelExecution::dumpGraph(char *filename)
1576 sprintf(buffer, "%s.dot", filename);
1577 FILE *file = fopen(buffer, "w");
1578 fprintf(file, "digraph %s {\n", filename);
1579 mo_graph->dumpNodes(file);
1580 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1582 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1583 ModelAction *act = it->getVal();
1584 if (act->is_read()) {
1585 mo_graph->dot_print_node(file, act);
1586 mo_graph->dot_print_edge(file,
1587 act->get_reads_from(),
1589 "label=\"rf\", color=red, weight=2");
1591 if (thread_array[act->get_tid()]) {
1592 mo_graph->dot_print_edge(file,
1593 thread_array[id_to_int(act->get_tid())],
1595 "label=\"sb\", color=blue, weight=400");
1598 thread_array[act->get_tid()] = act;
1600 fprintf(file, "}\n");
1601 model_free(thread_array);
1606 /** @brief Prints an execution trace summary. */
1607 void ModelExecution::print_summary()
1609 #if SUPPORT_MOD_ORDER_DUMP
1610 char buffername[100];
1611 sprintf(buffername, "exec%04u", get_execution_number());
1612 mo_graph->dumpGraphToFile(buffername);
1613 sprintf(buffername, "graph%04u", get_execution_number());
1614 dumpGraph(buffername);
1617 model_print("Execution trace %d:", get_execution_number());
1618 if (isfeasibleprefix()) {
1619 if (scheduler->all_threads_sleeping())
1620 model_print(" SLEEP-SET REDUNDANT");
1621 if (have_bug_reports())
1622 model_print(" DETECTED BUG(S)");
1624 print_infeasibility(" INFEASIBLE");
1627 print_list(&action_trace);
1633 * Add a Thread to the system for the first time. Should only be called once
1635 * @param t The Thread to add
1637 void ModelExecution::add_thread(Thread *t)
1639 unsigned int i = id_to_int(t->get_id());
1640 if (i >= thread_map.size())
1641 thread_map.resize(i + 1);
1643 if (!t->is_model_thread())
1644 scheduler->add_thread(t);
1648 * @brief Get a Thread reference by its ID
1649 * @param tid The Thread's ID
1650 * @return A Thread reference
1652 Thread * ModelExecution::get_thread(thread_id_t tid) const
1654 unsigned int i = id_to_int(tid);
1655 if (i < thread_map.size())
1656 return thread_map[i];
1661 * @brief Get a reference to the Thread in which a ModelAction was executed
1662 * @param act The ModelAction
1663 * @return A Thread reference
1665 Thread * ModelExecution::get_thread(const ModelAction *act) const
1667 return get_thread(act->get_tid());
1671 * @brief Get a Thread reference by its pthread ID
1672 * @param index The pthread's ID
1673 * @return A Thread reference
1675 Thread * ModelExecution::get_pthread(pthread_t pid) {
1681 uint32_t thread_id = x.v;
1682 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1687 * @brief Check if a Thread is currently enabled
1688 * @param t The Thread to check
1689 * @return True if the Thread is currently enabled
1691 bool ModelExecution::is_enabled(Thread *t) const
1693 return scheduler->is_enabled(t);
1697 * @brief Check if a Thread is currently enabled
1698 * @param tid The ID of the Thread to check
1699 * @return True if the Thread is currently enabled
1701 bool ModelExecution::is_enabled(thread_id_t tid) const
1703 return scheduler->is_enabled(tid);
1707 * @brief Select the next thread to execute based on the curren action
1709 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1710 * actions should be followed by the execution of their child thread. In either
1711 * case, the current action should determine the next thread schedule.
1713 * @param curr The current action
1714 * @return The next thread to run, if the current action will determine this
1715 * selection; otherwise NULL
1717 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1719 /* Do not split atomic RMW */
1720 if (curr->is_rmwr() && !paused_by_fuzzer(curr))
1721 return get_thread(curr);
1722 /* Follow CREATE with the created thread */
1723 /* which is not needed, because model.cc takes care of this */
1724 if (curr->get_type() == THREAD_CREATE)
1725 return curr->get_thread_operand();
1726 if (curr->get_type() == PTHREAD_CREATE) {
1727 return curr->get_thread_operand();
1732 /** @param act A read atomic action */
1733 bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
1735 ASSERT(act->is_read());
1737 // Actions paused by fuzzer have their sequence number reset to 0
1738 return act->get_seq_number() == 0;
1742 * Takes the next step in the execution, if possible.
1743 * @param curr The current step to take
1744 * @return Returns the next Thread to run, if any; NULL if this execution
1747 Thread * ModelExecution::take_step(ModelAction *curr)
1749 Thread *curr_thrd = get_thread(curr);
1750 ASSERT(curr_thrd->get_state() == THREAD_READY);
1752 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1753 curr = check_current_action(curr);
1756 /* Process this action in ModelHistory for records */
1757 model->get_history()->process_action( curr, curr->get_tid() );
1759 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1760 scheduler->remove_thread(curr_thrd);
1762 return action_select_next_thread(curr);
1765 Fuzzer * ModelExecution::getFuzzer() {