11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
31 bad_synchronization(false),
35 ~model_snapshot_members() {
36 for (unsigned int i = 0;i < bugs.size();i++)
41 unsigned int next_thread_id;
42 modelclock_t used_sequence_numbers;
43 SnapVector<bug_message *> bugs;
44 /** @brief Incorrectly-ordered synchronization was made */
45 bool bad_synchronization;
51 /** @brief Constructor */
52 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
57 thread_map(2), /* We'll always need at least 2 threads */
61 condvar_waiters_map(),
65 thrd_last_fence_release(),
66 priv(new struct model_snapshot_members ()),
67 mo_graph(new CycleGraph()),
68 fuzzer(new NewFuzzer()),
70 thrd_func_act_lists(),
73 /* Initialize a model-checker thread, for special ModelActions */
74 model_thread = new Thread(get_next_id());
75 add_thread(model_thread);
76 scheduler->register_engine(this);
77 fuzzer->register_engine(m->get_history(), this);
80 /** @brief Destructor */
81 ModelExecution::~ModelExecution()
83 for (unsigned int i = 0;i < get_num_threads();i++)
84 delete get_thread(int_to_id(i));
90 int ModelExecution::get_execution_number() const
92 return model->get_execution_number();
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
97 action_list_t *tmp = hash->get(ptr);
99 tmp = new action_list_t();
105 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
107 SnapVector<action_list_t> *tmp = hash->get(ptr);
109 tmp = new SnapVector<action_list_t>();
115 /** @return a thread ID for a new Thread */
116 thread_id_t ModelExecution::get_next_id()
118 return priv->next_thread_id++;
121 /** @return the number of user threads created during this execution */
122 unsigned int ModelExecution::get_num_threads() const
124 return priv->next_thread_id;
127 /** @return a sequence number for a new ModelAction */
128 modelclock_t ModelExecution::get_next_seq_num()
130 return ++priv->used_sequence_numbers;
133 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
134 void ModelExecution::restore_last_seq_num()
136 priv->used_sequence_numbers--;
140 * @brief Should the current action wake up a given thread?
142 * @param curr The current action
143 * @param thread The thread that we might wake up
144 * @return True, if we should wake up the sleeping thread; false otherwise
146 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
148 const ModelAction *asleep = thread->get_pending();
149 /* Don't allow partial RMW to wake anyone up */
152 /* Synchronizing actions may have been backtracked */
153 if (asleep->could_synchronize_with(curr))
155 /* All acquire/release fences and fence-acquire/store-release */
156 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
158 /* Fence-release + store can awake load-acquire on the same location */
159 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
160 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
161 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
164 if (asleep->is_sleep()) {
165 if (fuzzer->shouldWake(asleep))
172 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
174 for (unsigned int i = 0;i < get_num_threads();i++) {
175 Thread *thr = get_thread(int_to_id(i));
176 if (scheduler->is_sleep_set(thr)) {
177 if (should_wake_up(curr, thr))
178 /* Remove this thread from sleep set */
179 scheduler->remove_sleep(thr);
184 /** @brief Alert the model-checker that an incorrectly-ordered
185 * synchronization was made */
186 void ModelExecution::set_bad_synchronization()
188 priv->bad_synchronization = true;
191 bool ModelExecution::assert_bug(const char *msg)
193 priv->bugs.push_back(new bug_message(msg));
195 if (isfeasibleprefix()) {
202 /** @return True, if any bugs have been reported for this execution */
203 bool ModelExecution::have_bug_reports() const
205 return priv->bugs.size() != 0;
208 SnapVector<bug_message *> * ModelExecution::get_bugs() const
214 * Check whether the current trace has triggered an assertion which should halt
217 * @return True, if the execution should be aborted; false otherwise
219 bool ModelExecution::has_asserted() const
221 return priv->asserted;
225 * Trigger a trace assertion which should cause this execution to be halted.
226 * This can be due to a detected bug or due to an infeasibility that should
229 void ModelExecution::set_assert()
231 priv->asserted = true;
235 * Check if we are in a deadlock. Should only be called at the end of an
236 * execution, although it should not give false positives in the middle of an
237 * execution (there should be some ENABLED thread).
239 * @return True if program is in a deadlock; false otherwise
241 bool ModelExecution::is_deadlocked() const
243 bool blocking_threads = false;
244 for (unsigned int i = 0;i < get_num_threads();i++) {
245 thread_id_t tid = int_to_id(i);
248 Thread *t = get_thread(tid);
249 if (!t->is_model_thread() && t->get_pending())
250 blocking_threads = true;
252 return blocking_threads;
256 * Check if this is a complete execution. That is, have all thread completed
257 * execution (rather than exiting because sleep sets have forced a redundant
260 * @return True if the execution is complete.
262 bool ModelExecution::is_complete_execution() const
264 for (unsigned int i = 0;i < get_num_threads();i++)
265 if (is_enabled(int_to_id(i)))
270 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
271 uint64_t value = *((const uint64_t *) location);
272 modelclock_t storeclock;
273 thread_id_t storethread;
274 getStoreThreadAndClock(location, &storethread, &storeclock);
275 setAtomicStoreFlag(location);
276 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
277 act->set_seq_number(storeclock);
278 add_normal_write_to_lists(act);
279 add_write_to_lists(act);
280 w_modification_order(act);
281 model->get_history()->process_action(act, act->get_tid());
286 * Processes a read model action.
287 * @param curr is the read model action to process.
288 * @param rf_set is the set of model actions we can possibly read from
289 * @return True if processing this read updates the mo_graph.
291 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
293 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
294 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
295 if (hasnonatomicstore) {
296 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
297 rf_set->push_back(nonatomicstore);
301 int index = fuzzer->selectWrite(curr, rf_set);
302 if (index == -1) // no feasible write exists
305 ModelAction *rf = (*rf_set)[index];
308 bool canprune = false;
309 if (r_modification_order(curr, rf, priorset, &canprune)) {
310 for(unsigned int i=0;i<priorset->size();i++) {
311 mo_graph->addEdge((*priorset)[i], rf);
314 get_thread(curr)->set_return_value(curr->get_return_value());
316 if (canprune && curr->get_type() == ATOMIC_READ) {
317 int tid = id_to_int(curr->get_tid());
318 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
323 (*rf_set)[index] = rf_set->back();
329 * Processes a lock, trylock, or unlock model action. @param curr is
330 * the read model action to process.
332 * The try lock operation checks whether the lock is taken. If not,
333 * it falls to the normal lock operation case. If so, it returns
336 * The lock operation has already been checked that it is enabled, so
337 * it just grabs the lock and synchronizes with the previous unlock.
339 * The unlock operation has to re-enable all of the threads that are
340 * waiting on the lock.
342 * @return True if synchronization was updated; false otherwise
344 bool ModelExecution::process_mutex(ModelAction *curr)
346 cdsc::mutex *mutex = curr->get_mutex();
347 struct cdsc::mutex_state *state = NULL;
350 state = mutex->get_state();
352 switch (curr->get_type()) {
353 case ATOMIC_TRYLOCK: {
354 bool success = !state->locked;
355 curr->set_try_lock(success);
357 get_thread(curr)->set_return_value(0);
360 get_thread(curr)->set_return_value(1);
362 //otherwise fall into the lock case
364 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
365 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
366 // assert_bug("Lock access before initialization");
367 state->locked = get_thread(curr);
368 ModelAction *unlock = get_last_unlock(curr);
369 //synchronize with the previous unlock statement
370 if (unlock != NULL) {
371 synchronize(unlock, curr);
377 /* wake up the other threads */
378 for (unsigned int i = 0;i < get_num_threads();i++) {
379 Thread *t = get_thread(int_to_id(i));
380 Thread *curr_thrd = get_thread(curr);
381 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
385 /* unlock the lock - after checking who was waiting on it */
386 state->locked = NULL;
388 if (fuzzer->shouldWait(curr)) {
389 /* disable this thread */
390 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
391 scheduler->sleep(get_thread(curr));
396 case ATOMIC_TIMEDWAIT:
397 case ATOMIC_UNLOCK: {
398 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
400 /* wake up the other threads */
401 for (unsigned int i = 0;i < get_num_threads();i++) {
402 Thread *t = get_thread(int_to_id(i));
403 Thread *curr_thrd = get_thread(curr);
404 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
408 /* unlock the lock - after checking who was waiting on it */
409 state->locked = NULL;
412 case ATOMIC_NOTIFY_ALL: {
413 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
414 //activate all the waiting threads
415 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
416 scheduler->wake(get_thread(rit->getVal()));
421 case ATOMIC_NOTIFY_ONE: {
422 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
423 if (waiters->size() != 0) {
424 Thread * thread = fuzzer->selectNotify(waiters);
425 scheduler->wake(thread);
437 * Process a write ModelAction
438 * @param curr The ModelAction to process
439 * @return True if the mo_graph was updated or promises were resolved
441 void ModelExecution::process_write(ModelAction *curr)
443 w_modification_order(curr);
444 get_thread(curr)->set_return_value(VALUE_NONE);
448 * Process a fence ModelAction
449 * @param curr The ModelAction to process
450 * @return True if synchronization was updated
452 bool ModelExecution::process_fence(ModelAction *curr)
455 * fence-relaxed: no-op
456 * fence-release: only log the occurence (not in this function), for
457 * use in later synchronization
458 * fence-acquire (this function): search for hypothetical release
460 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
462 bool updated = false;
463 if (curr->is_acquire()) {
464 action_list_t *list = &action_trace;
465 sllnode<ModelAction *> * rit;
466 /* Find X : is_read(X) && X --sb-> curr */
467 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
468 ModelAction *act = rit->getVal();
471 if (act->get_tid() != curr->get_tid())
473 /* Stop at the beginning of the thread */
474 if (act->is_thread_start())
476 /* Stop once we reach a prior fence-acquire */
477 if (act->is_fence() && act->is_acquire())
481 /* read-acquire will find its own release sequences */
482 if (act->is_acquire())
485 /* Establish hypothetical release sequences */
486 ClockVector *cv = get_hb_from_write(act->get_reads_from());
487 if (cv != NULL && curr->get_cv()->merge(cv))
495 * @brief Process the current action for thread-related activity
497 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
498 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
499 * synchronization, etc. This function is a no-op for non-THREAD actions
500 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
502 * @param curr The current action
503 * @return True if synchronization was updated or a thread completed
505 void ModelExecution::process_thread_action(ModelAction *curr)
507 switch (curr->get_type()) {
508 case THREAD_CREATE: {
509 thrd_t *thrd = (thrd_t *)curr->get_location();
510 struct thread_params *params = (struct thread_params *)curr->get_value();
511 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
512 curr->set_thread_operand(th);
514 th->set_creation(curr);
517 case PTHREAD_CREATE: {
518 (*(uint32_t *)curr->get_location()) = pthread_counter++;
520 struct pthread_params *params = (struct pthread_params *)curr->get_value();
521 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
522 curr->set_thread_operand(th);
524 th->set_creation(curr);
526 if ( pthread_map.size() < pthread_counter )
527 pthread_map.resize( pthread_counter );
528 pthread_map[ pthread_counter-1 ] = th;
533 Thread *blocking = curr->get_thread_operand();
534 ModelAction *act = get_last_action(blocking->get_id());
535 synchronize(act, curr);
539 Thread *blocking = curr->get_thread_operand();
540 ModelAction *act = get_last_action(blocking->get_id());
541 synchronize(act, curr);
542 break; // WL: to be add (modified)
545 case THREADONLY_FINISH:
546 case THREAD_FINISH: {
547 Thread *th = get_thread(curr);
548 if (curr->get_type() == THREAD_FINISH &&
549 th == model->getInitThread()) {
555 /* Wake up any joining threads */
556 for (unsigned int i = 0;i < get_num_threads();i++) {
557 Thread *waiting = get_thread(int_to_id(i));
558 if (waiting->waiting_on() == th &&
559 waiting->get_pending()->is_thread_join())
560 scheduler->wake(waiting);
574 * Initialize the current action by performing one or more of the following
575 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
576 * manipulating backtracking sets, allocating and
577 * initializing clock vectors, and computing the promises to fulfill.
579 * @param curr The current action, as passed from the user context; may be
580 * freed/invalidated after the execution of this function, with a different
581 * action "returned" its place (pass-by-reference)
582 * @return True if curr is a newly-explored action; false otherwise
584 bool ModelExecution::initialize_curr_action(ModelAction **curr)
586 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
587 ModelAction *newcurr = process_rmw(*curr);
593 ModelAction *newcurr = *curr;
595 newcurr->set_seq_number(get_next_seq_num());
596 /* Always compute new clock vector */
597 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
599 /* Assign most recent release fence */
600 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
602 return true; /* This was a new ModelAction */
607 * @brief Establish reads-from relation between two actions
609 * Perform basic operations involved with establishing a concrete rf relation,
610 * including setting the ModelAction data and checking for release sequences.
612 * @param act The action that is reading (must be a read)
613 * @param rf The action from which we are reading (must be a write)
615 * @return True if this read established synchronization
618 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
621 ASSERT(rf->is_write());
623 act->set_read_from(rf);
624 if (act->is_acquire()) {
625 ClockVector *cv = get_hb_from_write(rf);
628 act->get_cv()->merge(cv);
633 * @brief Synchronizes two actions
635 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
636 * This function performs the synchronization as well as providing other hooks
637 * for other checks along with synchronization.
639 * @param first The left-hand side of the synchronizes-with relation
640 * @param second The right-hand side of the synchronizes-with relation
641 * @return True if the synchronization was successful (i.e., was consistent
642 * with the execution order); false otherwise
644 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
646 if (*second < *first) {
647 set_bad_synchronization();
650 return second->synchronize_with(first);
654 * @brief Check whether a model action is enabled.
656 * Checks whether an operation would be successful (i.e., is a lock already
657 * locked, or is the joined thread already complete).
659 * For yield-blocking, yields are never enabled.
661 * @param curr is the ModelAction to check whether it is enabled.
662 * @return a bool that indicates whether the action is enabled.
664 bool ModelExecution::check_action_enabled(ModelAction *curr) {
665 if (curr->is_lock()) {
666 cdsc::mutex *lock = curr->get_mutex();
667 struct cdsc::mutex_state *state = lock->get_state();
670 } else if (curr->is_thread_join()) {
671 Thread *blocking = curr->get_thread_operand();
672 if (!blocking->is_complete()) {
675 } else if (curr->is_sleep()) {
676 if (!fuzzer->shouldSleep(curr))
684 * This is the heart of the model checker routine. It performs model-checking
685 * actions corresponding to a given "current action." Among other processes, it
686 * calculates reads-from relationships, updates synchronization clock vectors,
687 * forms a memory_order constraints graph, and handles replay/backtrack
688 * execution when running permutations of previously-observed executions.
690 * @param curr The current action to process
691 * @return The ModelAction that is actually executed; may be different than
694 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
697 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
698 bool newly_explored = initialize_curr_action(&curr);
702 wake_up_sleeping_actions(curr);
704 /* Add uninitialized actions to lists */
705 if (!second_part_of_rmw && curr->get_type() != NOOP)
706 add_uninit_action_to_lists(curr);
708 SnapVector<ModelAction *> * rf_set = NULL;
709 /* Build may_read_from set for newly-created actions */
710 if (newly_explored && curr->is_read())
711 rf_set = build_may_read_from(curr);
713 if (curr->is_read() && !second_part_of_rmw) {
714 bool success = process_read(curr, rf_set);
717 return curr; // Do not add action to lists
719 ASSERT(rf_set == NULL);
721 /* Add the action to lists */
722 if (!second_part_of_rmw && curr->get_type() != NOOP)
723 add_action_to_lists(curr);
725 if (curr->is_write())
726 add_write_to_lists(curr);
728 process_thread_action(curr);
730 if (curr->is_write())
733 if (curr->is_fence())
736 if (curr->is_mutex_op())
743 * This is the strongest feasibility check available.
744 * @return whether the current trace (partial or complete) must be a prefix of
747 bool ModelExecution::isfeasibleprefix() const
749 return !is_infeasible();
753 * Print disagnostic information about an infeasible execution
754 * @param prefix A string to prefix the output with; if NULL, then a default
755 * message prefix will be provided
757 void ModelExecution::print_infeasibility(const char *prefix) const
761 if (priv->bad_synchronization)
762 ptr += sprintf(ptr, "[bad sw ordering]");
764 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
768 * Check if the current partial trace is infeasible. Does not check any
769 * end-of-execution flags, which might rule out the execution. Thus, this is
770 * useful only for ruling an execution as infeasible.
771 * @return whether the current partial trace is infeasible.
773 bool ModelExecution::is_infeasible() const
775 return priv->bad_synchronization;
778 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
779 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
780 ModelAction *lastread = get_last_action(act->get_tid());
781 lastread->process_rmw(act);
783 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
789 * @brief Updates the mo_graph with the constraints imposed from the current
792 * Basic idea is the following: Go through each other thread and find
793 * the last action that happened before our read. Two cases:
795 * -# The action is a write: that write must either occur before
796 * the write we read from or be the write we read from.
797 * -# The action is a read: the write that that action read from
798 * must occur before the write we read from or be the same write.
800 * @param curr The current action. Must be a read.
801 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
802 * @return True if modification order edges were added; false otherwise
805 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
807 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
809 ASSERT(curr->is_read());
811 /* Last SC fence in the current thread */
812 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
814 int tid = curr->get_tid();
815 ModelAction *prev_same_thread = NULL;
816 /* Iterate over all threads */
817 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
818 /* Last SC fence in thread tid */
819 ModelAction *last_sc_fence_thread_local = NULL;
821 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
823 /* Last SC fence in thread tid, before last SC fence in current thread */
824 ModelAction *last_sc_fence_thread_before = NULL;
825 if (last_sc_fence_local)
826 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
828 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
829 if (prev_same_thread != NULL &&
830 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
831 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
835 /* Iterate over actions in thread, starting from most recent */
836 action_list_t *list = &(*thrd_lists)[tid];
837 sllnode<ModelAction *> * rit;
838 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
839 ModelAction *act = rit->getVal();
844 /* Don't want to add reflexive edges on 'rf' */
845 if (act->equals(rf)) {
846 if (act->happens_before(curr))
852 if (act->is_write()) {
853 /* C++, Section 29.3 statement 5 */
854 if (curr->is_seqcst() && last_sc_fence_thread_local &&
855 *act < *last_sc_fence_thread_local) {
856 if (mo_graph->checkReachable(rf, act))
858 priorset->push_back(act);
861 /* C++, Section 29.3 statement 4 */
862 else if (act->is_seqcst() && last_sc_fence_local &&
863 *act < *last_sc_fence_local) {
864 if (mo_graph->checkReachable(rf, act))
866 priorset->push_back(act);
869 /* C++, Section 29.3 statement 6 */
870 else if (last_sc_fence_thread_before &&
871 *act < *last_sc_fence_thread_before) {
872 if (mo_graph->checkReachable(rf, act))
874 priorset->push_back(act);
880 * Include at most one act per-thread that "happens
883 if (act->happens_before(curr)) {
885 if (last_sc_fence_local == NULL ||
886 (*last_sc_fence_local < *act)) {
887 prev_same_thread = act;
890 if (act->is_write()) {
891 if (mo_graph->checkReachable(rf, act))
893 priorset->push_back(act);
895 const ModelAction *prevrf = act->get_reads_from();
896 if (!prevrf->equals(rf)) {
897 if (mo_graph->checkReachable(rf, prevrf))
899 priorset->push_back(prevrf);
901 if (act->get_tid() == curr->get_tid()) {
902 //Can prune curr from obj list
915 * Updates the mo_graph with the constraints imposed from the current write.
917 * Basic idea is the following: Go through each other thread and find
918 * the lastest action that happened before our write. Two cases:
920 * (1) The action is a write => that write must occur before
923 * (2) The action is a read => the write that that action read from
924 * must occur before the current write.
926 * This method also handles two other issues:
928 * (I) Sequential Consistency: Making sure that if the current write is
929 * seq_cst, that it occurs after the previous seq_cst write.
931 * (II) Sending the write back to non-synchronizing reads.
933 * @param curr The current action. Must be a write.
934 * @param send_fv A vector for stashing reads to which we may pass our future
935 * value. If NULL, then don't record any future values.
936 * @return True if modification order edges were added; false otherwise
938 void ModelExecution::w_modification_order(ModelAction *curr)
940 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
942 ASSERT(curr->is_write());
944 SnapList<ModelAction *> edgeset;
946 if (curr->is_seqcst()) {
947 /* We have to at least see the last sequentially consistent write,
948 so we are initialized. */
949 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
950 if (last_seq_cst != NULL) {
951 edgeset.push_back(last_seq_cst);
953 //update map for next query
954 obj_last_sc_map.put(curr->get_location(), curr);
957 /* Last SC fence in the current thread */
958 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
960 /* Iterate over all threads */
961 for (i = 0;i < thrd_lists->size();i++) {
962 /* Last SC fence in thread i, before last SC fence in current thread */
963 ModelAction *last_sc_fence_thread_before = NULL;
964 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
965 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
967 /* Iterate over actions in thread, starting from most recent */
968 action_list_t *list = &(*thrd_lists)[i];
969 sllnode<ModelAction*>* rit;
970 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
971 ModelAction *act = rit->getVal();
974 * 1) If RMW and it actually read from something, then we
975 * already have all relevant edges, so just skip to next
978 * 2) If RMW and it didn't read from anything, we should
979 * whatever edge we can get to speed up convergence.
981 * 3) If normal write, we need to look at earlier actions, so
982 * continue processing list.
984 if (curr->is_rmw()) {
985 if (curr->get_reads_from() != NULL)
993 /* C++, Section 29.3 statement 7 */
994 if (last_sc_fence_thread_before && act->is_write() &&
995 *act < *last_sc_fence_thread_before) {
996 edgeset.push_back(act);
1001 * Include at most one act per-thread that "happens
1004 if (act->happens_before(curr)) {
1006 * Note: if act is RMW, just add edge:
1008 * The following edge should be handled elsewhere:
1009 * readfrom(act) --mo--> act
1011 if (act->is_write())
1012 edgeset.push_back(act);
1013 else if (act->is_read()) {
1014 //if previous read accessed a null, just keep going
1015 edgeset.push_back(act->get_reads_from());
1021 mo_graph->addEdges(&edgeset, curr);
1026 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1027 * some constraints. This method checks one the following constraint (others
1028 * require compiler support):
1030 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1031 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1033 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1035 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1037 /* Iterate over all threads */
1038 for (i = 0;i < thrd_lists->size();i++) {
1039 const ModelAction *write_after_read = NULL;
1041 /* Iterate over actions in thread, starting from most recent */
1042 action_list_t *list = &(*thrd_lists)[i];
1043 sllnode<ModelAction *>* rit;
1044 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1045 ModelAction *act = rit->getVal();
1047 /* Don't disallow due to act == reader */
1048 if (!reader->happens_before(act) || reader == act)
1050 else if (act->is_write())
1051 write_after_read = act;
1052 else if (act->is_read() && act->get_reads_from() != NULL)
1053 write_after_read = act->get_reads_from();
1056 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1063 * Computes the clock vector that happens before propagates from this write.
1065 * @param rf The action that might be part of a release sequence. Must be a
1067 * @return ClockVector of happens before relation.
1070 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1071 SnapVector<ModelAction *> * processset = NULL;
1072 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1073 ASSERT(rf->is_write());
1074 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1076 if (processset == NULL)
1077 processset = new SnapVector<ModelAction *>();
1078 processset->push_back(rf);
1081 int i = (processset == NULL) ? 0 : processset->size();
1083 ClockVector * vec = NULL;
1085 if (rf->get_rfcv() != NULL) {
1086 vec = rf->get_rfcv();
1087 } else if (rf->is_acquire() && rf->is_release()) {
1089 } else if (rf->is_release() && !rf->is_rmw()) {
1091 } else if (rf->is_release()) {
1092 //have rmw that is release and doesn't have a rfcv
1093 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1096 //operation that isn't release
1097 if (rf->get_last_fence_release()) {
1099 vec = rf->get_last_fence_release()->get_cv();
1101 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1107 rf = (*processset)[i];
1111 if (processset != NULL)
1117 * Performs various bookkeeping operations for the current ModelAction when it is
1118 * the first atomic action occurred at its memory location.
1120 * For instance, adds uninitialized action to the per-object, per-thread action vector
1121 * and to the action trace list of all thread actions.
1123 * @param act is the ModelAction to process.
1125 void ModelExecution::add_uninit_action_to_lists(ModelAction *act)
1127 int tid = id_to_int(act->get_tid());
1128 ModelAction *uninit = NULL;
1130 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1131 if (list->empty() && act->is_atomic_var()) {
1132 uninit = get_uninitialized_action(act);
1133 uninit_id = id_to_int(uninit->get_tid());
1134 list->push_front(uninit);
1135 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1136 if ((int)vec->size() <= uninit_id) {
1137 int oldsize = (int) vec->size();
1138 vec->resize(uninit_id + 1);
1139 for(int i = oldsize; i < uninit_id + 1; i++) {
1140 new (&(*vec)[i]) action_list_t();
1143 (*vec)[uninit_id].push_front(uninit);
1146 // Update action trace, a total order of all actions
1148 action_trace.push_front(uninit);
1150 // Update obj_thrd_map, a per location, per thread, order of actions
1151 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1152 if ((int)vec->size() <= tid) {
1153 uint oldsize = vec->size();
1154 vec->resize(priv->next_thread_id);
1155 for(uint i = oldsize; i < priv->next_thread_id; i++)
1156 new (&(*vec)[i]) action_list_t();
1159 (*vec)[uninit_id].push_front(uninit);
1161 // Update thrd_last_action, the last action taken by each thrad
1162 if ((int)thrd_last_action.size() <= tid)
1163 thrd_last_action.resize(get_num_threads());
1165 thrd_last_action[uninit_id] = uninit;
1170 * Performs various bookkeeping operations for the current ModelAction. For
1171 * instance, adds action to the per-object, per-thread action vector and to the
1172 * action trace list of all thread actions.
1174 * @param act is the ModelAction to add.
1176 void ModelExecution::add_action_to_lists(ModelAction *act)
1178 int tid = id_to_int(act->get_tid());
1179 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1180 list->push_back(act);
1182 // Update action trace, a total order of all actions
1183 action_trace.push_back(act);
1185 // Update obj_thrd_map, a per location, per thread, order of actions
1186 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1187 if ((int)vec->size() <= tid) {
1188 uint oldsize = vec->size();
1189 vec->resize(priv->next_thread_id);
1190 for(uint i = oldsize; i < priv->next_thread_id; i++)
1191 new (&(*vec)[i]) action_list_t();
1193 (*vec)[tid].push_back(act);
1195 // Update thrd_last_action, the last action taken by each thrad
1196 if ((int)thrd_last_action.size() <= tid)
1197 thrd_last_action.resize(get_num_threads());
1198 thrd_last_action[tid] = act;
1200 // Update thrd_last_fence_release, the last release fence taken by each thread
1201 if (act->is_fence() && act->is_release()) {
1202 if ((int)thrd_last_fence_release.size() <= tid)
1203 thrd_last_fence_release.resize(get_num_threads());
1204 thrd_last_fence_release[tid] = act;
1207 if (act->is_wait()) {
1208 void *mutex_loc = (void *) act->get_value();
1209 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1211 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1212 if ((int)vec->size() <= tid) {
1213 uint oldsize = vec->size();
1214 vec->resize(priv->next_thread_id);
1215 for(uint i = oldsize; i < priv->next_thread_id; i++)
1216 new (&(*vec)[i]) action_list_t();
1218 (*vec)[tid].push_back(act);
1222 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1223 sllnode<ModelAction*> * rit = list->end();
1224 modelclock_t next_seq = act->get_seq_number();
1225 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1226 list->push_back(act);
1228 for(;rit != NULL;rit=rit->getPrev()) {
1229 if (rit->getVal()->get_seq_number() == next_seq) {
1230 list->insertAfter(rit, act);
1237 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1238 sllnode<ModelAction*> * rit = list->end();
1239 modelclock_t next_seq = act->get_seq_number();
1241 act->create_cv(NULL);
1242 } else if (rit->getVal()->get_seq_number() == next_seq) {
1243 act->create_cv(rit->getVal());
1244 list->push_back(act);
1246 for(;rit != NULL;rit=rit->getPrev()) {
1247 if (rit->getVal()->get_seq_number() == next_seq) {
1248 act->create_cv(rit->getVal());
1249 list->insertAfter(rit, act);
1257 * Performs various bookkeeping operations for a normal write. The
1258 * complication is that we are typically inserting a normal write
1259 * lazily, so we need to insert it into the middle of lists.
1261 * @param act is the ModelAction to add.
1264 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1266 int tid = id_to_int(act->get_tid());
1267 insertIntoActionListAndSetCV(&action_trace, act);
1269 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1270 insertIntoActionList(list, act);
1272 // Update obj_thrd_map, a per location, per thread, order of actions
1273 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1274 if (tid >= (int)vec->size()) {
1275 uint oldsize =vec->size();
1276 vec->resize(priv->next_thread_id);
1277 for(uint i=oldsize;i<priv->next_thread_id;i++)
1278 new (&(*vec)[i]) action_list_t();
1280 insertIntoActionList(&(*vec)[tid],act);
1282 // Update thrd_last_action, the last action taken by each thrad
1283 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1284 thrd_last_action[tid] = act;
1288 void ModelExecution::add_write_to_lists(ModelAction *write) {
1289 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1290 int tid = id_to_int(write->get_tid());
1291 if (tid >= (int)vec->size()) {
1292 uint oldsize =vec->size();
1293 vec->resize(priv->next_thread_id);
1294 for(uint i=oldsize;i<priv->next_thread_id;i++)
1295 new (&(*vec)[i]) action_list_t();
1297 (*vec)[tid].push_back(write);
1301 * @brief Get the last action performed by a particular Thread
1302 * @param tid The thread ID of the Thread in question
1303 * @return The last action in the thread
1305 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1307 int threadid = id_to_int(tid);
1308 if (threadid < (int)thrd_last_action.size())
1309 return thrd_last_action[id_to_int(tid)];
1315 * @brief Get the last fence release performed by a particular Thread
1316 * @param tid The thread ID of the Thread in question
1317 * @return The last fence release in the thread, if one exists; NULL otherwise
1319 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1321 int threadid = id_to_int(tid);
1322 if (threadid < (int)thrd_last_fence_release.size())
1323 return thrd_last_fence_release[id_to_int(tid)];
1329 * Gets the last memory_order_seq_cst write (in the total global sequence)
1330 * performed on a particular object (i.e., memory location), not including the
1332 * @param curr The current ModelAction; also denotes the object location to
1334 * @return The last seq_cst write
1336 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1338 void *location = curr->get_location();
1339 return obj_last_sc_map.get(location);
1343 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1344 * performed in a particular thread, prior to a particular fence.
1345 * @param tid The ID of the thread to check
1346 * @param before_fence The fence from which to begin the search; if NULL, then
1347 * search for the most recent fence in the thread.
1348 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1350 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1352 /* All fences should have location FENCE_LOCATION */
1353 action_list_t *list = obj_map.get(FENCE_LOCATION);
1358 sllnode<ModelAction*>* rit = list->end();
1361 for (;rit != NULL;rit=rit->getPrev())
1362 if (rit->getVal() == before_fence)
1365 ASSERT(rit->getVal() == before_fence);
1369 for (;rit != NULL;rit=rit->getPrev()) {
1370 ModelAction *act = rit->getVal();
1371 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1378 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1379 * location). This function identifies the mutex according to the current
1380 * action, which is presumed to perform on the same mutex.
1381 * @param curr The current ModelAction; also denotes the object location to
1383 * @return The last unlock operation
1385 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1387 void *location = curr->get_location();
1389 action_list_t *list = obj_map.get(location);
1390 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1391 sllnode<ModelAction*>* rit;
1392 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1393 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1394 return rit->getVal();
1398 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1400 ModelAction *parent = get_last_action(tid);
1402 parent = get_thread(tid)->get_creation();
1407 * Returns the clock vector for a given thread.
1408 * @param tid The thread whose clock vector we want
1409 * @return Desired clock vector
1411 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1413 ModelAction *firstaction=get_parent_action(tid);
1414 return firstaction != NULL ? firstaction->get_cv() : NULL;
1417 bool valequals(uint64_t val1, uint64_t val2, int size) {
1420 return ((uint8_t)val1) == ((uint8_t)val2);
1422 return ((uint16_t)val1) == ((uint16_t)val2);
1424 return ((uint32_t)val1) == ((uint32_t)val2);
1434 * Build up an initial set of all past writes that this 'read' action may read
1435 * from, as well as any previously-observed future values that must still be valid.
1437 * @param curr is the current ModelAction that we are exploring; it must be a
1440 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1442 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1444 ASSERT(curr->is_read());
1446 ModelAction *last_sc_write = NULL;
1448 if (curr->is_seqcst())
1449 last_sc_write = get_last_seq_cst_write(curr);
1451 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1453 /* Iterate over all threads */
1454 for (i = 0;i < thrd_lists->size();i++) {
1455 /* Iterate over actions in thread, starting from most recent */
1456 action_list_t *list = &(*thrd_lists)[i];
1457 sllnode<ModelAction *> * rit;
1458 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1459 ModelAction *act = rit->getVal();
1464 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1465 bool allow_read = true;
1467 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1470 /* Need to check whether we will have two RMW reading from the same value */
1471 if (curr->is_rmwr()) {
1472 /* It is okay if we have a failing CAS */
1473 if (!curr->is_rmwrcas() ||
1474 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1475 //Need to make sure we aren't the second RMW
1476 CycleNode * node = mo_graph->getNode_noCreate(act);
1477 if (node != NULL && node->getRMW() != NULL) {
1478 //we are the second RMW
1485 /* Only add feasible reads */
1486 rf_set->push_back(act);
1489 /* Include at most one act per-thread that "happens before" curr */
1490 if (act->happens_before(curr))
1495 if (DBG_ENABLED()) {
1496 model_print("Reached read action:\n");
1498 model_print("End printing read_from_past\n");
1504 * @brief Get an action representing an uninitialized atomic
1506 * This function may create a new one.
1508 * @param curr The current action, which prompts the creation of an UNINIT action
1509 * @return A pointer to the UNINIT ModelAction
1511 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1513 ModelAction *act = curr->get_uninit_action();
1515 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1516 curr->set_uninit_action(act);
1518 act->create_cv(NULL);
1522 static void print_list(action_list_t *list)
1524 sllnode<ModelAction*> *it;
1526 model_print("------------------------------------------------------------------------------------\n");
1527 model_print("# t Action type MO Location Value Rf CV\n");
1528 model_print("------------------------------------------------------------------------------------\n");
1530 unsigned int hash = 0;
1532 for (it = list->begin();it != NULL;it=it->getNext()) {
1533 const ModelAction *act = it->getVal();
1534 if (act->get_seq_number() > 0)
1536 hash = hash^(hash<<3)^(it->getVal()->hash());
1538 model_print("HASH %u\n", hash);
1539 model_print("------------------------------------------------------------------------------------\n");
1542 #if SUPPORT_MOD_ORDER_DUMP
1543 void ModelExecution::dumpGraph(char *filename)
1546 sprintf(buffer, "%s.dot", filename);
1547 FILE *file = fopen(buffer, "w");
1548 fprintf(file, "digraph %s {\n", filename);
1549 mo_graph->dumpNodes(file);
1550 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1552 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1553 ModelAction *act = it->getVal();
1554 if (act->is_read()) {
1555 mo_graph->dot_print_node(file, act);
1556 mo_graph->dot_print_edge(file,
1557 act->get_reads_from(),
1559 "label=\"rf\", color=red, weight=2");
1561 if (thread_array[act->get_tid()]) {
1562 mo_graph->dot_print_edge(file,
1563 thread_array[id_to_int(act->get_tid())],
1565 "label=\"sb\", color=blue, weight=400");
1568 thread_array[act->get_tid()] = act;
1570 fprintf(file, "}\n");
1571 model_free(thread_array);
1576 /** @brief Prints an execution trace summary. */
1577 void ModelExecution::print_summary()
1579 #if SUPPORT_MOD_ORDER_DUMP
1580 char buffername[100];
1581 sprintf(buffername, "exec%04u", get_execution_number());
1582 mo_graph->dumpGraphToFile(buffername);
1583 sprintf(buffername, "graph%04u", get_execution_number());
1584 dumpGraph(buffername);
1587 model_print("Execution trace %d:", get_execution_number());
1588 if (isfeasibleprefix()) {
1589 if (scheduler->all_threads_sleeping())
1590 model_print(" SLEEP-SET REDUNDANT");
1591 if (have_bug_reports())
1592 model_print(" DETECTED BUG(S)");
1594 print_infeasibility(" INFEASIBLE");
1597 print_list(&action_trace);
1603 * Add a Thread to the system for the first time. Should only be called once
1605 * @param t The Thread to add
1607 void ModelExecution::add_thread(Thread *t)
1609 unsigned int i = id_to_int(t->get_id());
1610 if (i >= thread_map.size())
1611 thread_map.resize(i + 1);
1613 if (!t->is_model_thread())
1614 scheduler->add_thread(t);
1618 * @brief Get a Thread reference by its ID
1619 * @param tid The Thread's ID
1620 * @return A Thread reference
1622 Thread * ModelExecution::get_thread(thread_id_t tid) const
1624 unsigned int i = id_to_int(tid);
1625 if (i < thread_map.size())
1626 return thread_map[i];
1631 * @brief Get a reference to the Thread in which a ModelAction was executed
1632 * @param act The ModelAction
1633 * @return A Thread reference
1635 Thread * ModelExecution::get_thread(const ModelAction *act) const
1637 return get_thread(act->get_tid());
1641 * @brief Get a Thread reference by its pthread ID
1642 * @param index The pthread's ID
1643 * @return A Thread reference
1645 Thread * ModelExecution::get_pthread(pthread_t pid) {
1651 uint32_t thread_id = x.v;
1652 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1657 * @brief Check if a Thread is currently enabled
1658 * @param t The Thread to check
1659 * @return True if the Thread is currently enabled
1661 bool ModelExecution::is_enabled(Thread *t) const
1663 return scheduler->is_enabled(t);
1667 * @brief Check if a Thread is currently enabled
1668 * @param tid The ID of the Thread to check
1669 * @return True if the Thread is currently enabled
1671 bool ModelExecution::is_enabled(thread_id_t tid) const
1673 return scheduler->is_enabled(tid);
1677 * @brief Select the next thread to execute based on the curren action
1679 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1680 * actions should be followed by the execution of their child thread. In either
1681 * case, the current action should determine the next thread schedule.
1683 * @param curr The current action
1684 * @return The next thread to run, if the current action will determine this
1685 * selection; otherwise NULL
1687 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1689 /* Do not split atomic RMW */
1690 if (curr->is_rmwr() && !paused_by_fuzzer(curr))
1691 return get_thread(curr);
1692 /* Follow CREATE with the created thread */
1693 /* which is not needed, because model.cc takes care of this */
1694 if (curr->get_type() == THREAD_CREATE)
1695 return curr->get_thread_operand();
1696 if (curr->get_type() == PTHREAD_CREATE) {
1697 return curr->get_thread_operand();
1702 /** @param act A read atomic action */
1703 bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
1705 ASSERT(act->is_read());
1707 // Actions paused by fuzzer have their sequence number reset to 0
1708 return act->get_seq_number() == 0;
1712 * Takes the next step in the execution, if possible.
1713 * @param curr The current step to take
1714 * @return Returns the next Thread to run, if any; NULL if this execution
1717 Thread * ModelExecution::take_step(ModelAction *curr)
1719 Thread *curr_thrd = get_thread(curr);
1720 ASSERT(curr_thrd->get_state() == THREAD_READY);
1722 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1723 curr = check_current_action(curr);
1726 /* Process this action in ModelHistory for records */
1727 model->get_history()->process_action( curr, curr->get_tid() );
1729 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1730 scheduler->remove_thread(curr_thrd);
1732 return action_select_next_thread(curr);
1735 Fuzzer * ModelExecution::getFuzzer() {