11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
30 bad_synchronization(false),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
44 bool bad_synchronization;
54 //Code taken from LLVM and licensed under the University of Illinois Open Source
56 static uintptr_t thread_descriptor_size;
57 #if __LP64__ || defined(_WIN64)
58 # define SANITIZER_WORDSIZE 64
60 # define SANITIZER_WORDSIZE 32
63 #if SANITIZER_WORDSIZE == 64
64 # define FIRST_32_SECOND_64(a, b) (b)
66 # define FIRST_32_SECOND_64(a, b) (a)
69 #if defined(__x86_64__) && !defined(_LP64)
70 # define SANITIZER_X32 1
72 # define SANITIZER_X32 0
76 # define SANITIZER_ARM 1
78 # define SANITIZER_ARM 0
81 uintptr_t ThreadDescriptorSize() {
82 uintptr_t val = thread_descriptor_size;
85 #if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
86 #ifdef _CS_GNU_LIBC_VERSION
88 uintptr_t len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
89 if (len < sizeof(buf) && strncmp(buf, "glibc 2.", 8) == 0) {
91 int minor = strtoll(buf + 8, &end, 10);
92 if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) {
95 // strtoll will return 0 if no valid conversion could be performed
96 patch = strtoll(end + 1, nullptr, 10);
98 /* sizeof(struct pthread) values from various glibc versions. */
100 val = 1728;// Assume only one particular version for x32.
101 // For ARM sizeof(struct pthread) changed in Glibc 2.23.
102 else if (SANITIZER_ARM)
103 val = minor <= 22 ? 1120 : 1216;
105 val = FIRST_32_SECOND_64(1104, 1696);
107 val = FIRST_32_SECOND_64(1120, 1728);
109 val = FIRST_32_SECOND_64(1136, 1728);
111 val = FIRST_32_SECOND_64(1136, 1712);
112 else if (minor == 10)
113 val = FIRST_32_SECOND_64(1168, 1776);
114 else if (minor == 11 || (minor == 12 && patch == 1))
115 val = FIRST_32_SECOND_64(1168, 2288);
116 else if (minor <= 13)
117 val = FIRST_32_SECOND_64(1168, 2304);
119 val = FIRST_32_SECOND_64(1216, 2304);
123 #elif defined(__mips__)
124 // TODO(sagarthakur): add more values as per different glibc versions.
125 val = FIRST_32_SECOND_64(1152, 1776);
126 #elif defined(__aarch64__)
127 // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
129 #elif defined(__powerpc64__)
130 val = 1776; // from glibc.ppc64le 2.20-8.fc21
131 #elif defined(__s390__)
132 val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
135 thread_descriptor_size = val;
140 # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
142 # define DL_INTERNAL_FUNCTION
145 intptr_t RoundUpTo(uintptr_t size, uintptr_t boundary) {
146 return (size + boundary - 1) & ~(boundary - 1);
149 uintptr_t getTlsSize() {
150 // all current supported platforms have 16 bytes stack alignment
151 const size_t kStackAlign = 16;
152 typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
153 get_tls_func get_tls;
154 void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
155 memcpy(&get_tls, &get_tls_static_info_ptr, sizeof(get_tls_static_info_ptr));
156 ASSERT(get_tls != 0);
158 size_t tls_align = 0;
159 get_tls(&tls_size, &tls_align);
160 if (tls_align < kStackAlign)
161 tls_align = kStackAlign;
162 return RoundUpTo(tls_size, tls_align);
167 /** @brief Constructor */
168 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
171 scheduler(scheduler),
173 thread_map(2), /* We'll always need at least 2 threads */
177 condvar_waiters_map(),
181 thrd_last_fence_release(),
182 priv(new struct model_snapshot_members ()),
183 mo_graph(new CycleGraph()),
184 fuzzer(new Fuzzer()),
186 thrd_func_inst_lists()
194 /* Initialize a model-checker thread, for special ModelActions */
195 model_thread = new Thread(get_next_id());
196 add_thread(model_thread);
197 scheduler->register_engine(this);
201 void ModelExecution::initTLS() {
202 tls_addr = get_tls_addr();
203 tls_size = getTlsSize();
204 tls_addr -= tls_size;
205 thd_desc_size = ThreadDescriptorSize();
206 tls_addr += thd_desc_size;
207 tls_base = (char *) snapshot_calloc(tls_size,1);
208 memcpy(tls_base, reinterpret_cast<const char *>(tls_addr), tls_size);
212 /** @brief Destructor */
213 ModelExecution::~ModelExecution()
215 for (unsigned int i = 0;i < get_num_threads();i++)
216 delete get_thread(int_to_id(i));
222 int ModelExecution::get_execution_number() const
224 return model->get_execution_number();
227 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
229 action_list_t *tmp = hash->get(ptr);
231 tmp = new action_list_t();
237 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
239 SnapVector<action_list_t> *tmp = hash->get(ptr);
241 tmp = new SnapVector<action_list_t>();
247 /** @return a thread ID for a new Thread */
248 thread_id_t ModelExecution::get_next_id()
250 return priv->next_thread_id++;
253 /** @return the number of user threads created during this execution */
254 unsigned int ModelExecution::get_num_threads() const
256 return priv->next_thread_id;
259 /** @return a sequence number for a new ModelAction */
260 modelclock_t ModelExecution::get_next_seq_num()
262 return ++priv->used_sequence_numbers;
266 * @brief Should the current action wake up a given thread?
268 * @param curr The current action
269 * @param thread The thread that we might wake up
270 * @return True, if we should wake up the sleeping thread; false otherwise
272 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
274 const ModelAction *asleep = thread->get_pending();
275 /* Don't allow partial RMW to wake anyone up */
278 /* Synchronizing actions may have been backtracked */
279 if (asleep->could_synchronize_with(curr))
281 /* All acquire/release fences and fence-acquire/store-release */
282 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
284 /* Fence-release + store can awake load-acquire on the same location */
285 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
286 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
287 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
293 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
295 for (unsigned int i = 0;i < get_num_threads();i++) {
296 Thread *thr = get_thread(int_to_id(i));
297 if (scheduler->is_sleep_set(thr)) {
298 if (should_wake_up(curr, thr))
299 /* Remove this thread from sleep set */
300 scheduler->remove_sleep(thr);
305 /** @brief Alert the model-checker that an incorrectly-ordered
306 * synchronization was made */
307 void ModelExecution::set_bad_synchronization()
309 priv->bad_synchronization = true;
312 bool ModelExecution::assert_bug(const char *msg)
314 priv->bugs.push_back(new bug_message(msg));
316 if (isfeasibleprefix()) {
323 /** @return True, if any bugs have been reported for this execution */
324 bool ModelExecution::have_bug_reports() const
326 return priv->bugs.size() != 0;
329 /** @return True, if any fatal bugs have been reported for this execution.
330 * Any bug other than a data race is considered a fatal bug. Data races
331 * are not considered fatal unless the number of races is exceeds
332 * a threshold (temporarily set as 15).
334 bool ModelExecution::have_fatal_bug_reports() const
336 return priv->bugs.size() != 0;
339 SnapVector<bug_message *> * ModelExecution::get_bugs() const
345 * Check whether the current trace has triggered an assertion which should halt
348 * @return True, if the execution should be aborted; false otherwise
350 bool ModelExecution::has_asserted() const
352 return priv->asserted;
356 * Trigger a trace assertion which should cause this execution to be halted.
357 * This can be due to a detected bug or due to an infeasibility that should
360 void ModelExecution::set_assert()
362 priv->asserted = true;
366 * Check if we are in a deadlock. Should only be called at the end of an
367 * execution, although it should not give false positives in the middle of an
368 * execution (there should be some ENABLED thread).
370 * @return True if program is in a deadlock; false otherwise
372 bool ModelExecution::is_deadlocked() const
374 bool blocking_threads = false;
375 for (unsigned int i = 0;i < get_num_threads();i++) {
376 thread_id_t tid = int_to_id(i);
379 Thread *t = get_thread(tid);
380 if (!t->is_model_thread() && t->get_pending())
381 blocking_threads = true;
383 return blocking_threads;
387 * Check if this is a complete execution. That is, have all thread completed
388 * execution (rather than exiting because sleep sets have forced a redundant
391 * @return True if the execution is complete.
393 bool ModelExecution::is_complete_execution() const
395 for (unsigned int i = 0;i < get_num_threads();i++)
396 if (is_enabled(int_to_id(i)))
401 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
402 uint64_t value = *((const uint64_t *) location);
403 modelclock_t storeclock;
404 thread_id_t storethread;
405 getStoreThreadAndClock(location, &storethread, &storeclock);
406 setAtomicStoreFlag(location);
407 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
408 act->set_seq_number(storeclock);
409 add_normal_write_to_lists(act);
410 add_write_to_lists(act);
411 w_modification_order(act);
417 * Processes a read model action.
418 * @param curr is the read model action to process.
419 * @param rf_set is the set of model actions we can possibly read from
420 * @return True if processing this read updates the mo_graph.
422 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
424 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
425 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
426 if (hasnonatomicstore) {
427 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
428 rf_set->push_back(nonatomicstore);
432 int index = fuzzer->selectWrite(curr, rf_set);
433 ModelAction *rf = (*rf_set)[index];
437 bool canprune = false;
438 if (r_modification_order(curr, rf, priorset, &canprune)) {
439 for(unsigned int i=0;i<priorset->size();i++) {
440 mo_graph->addEdge((*priorset)[i], rf);
443 get_thread(curr)->set_return_value(curr->get_return_value());
445 if (canprune && curr->get_type() == ATOMIC_READ) {
446 int tid = id_to_int(curr->get_tid());
447 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
452 (*rf_set)[index] = rf_set->back();
458 * Processes a lock, trylock, or unlock model action. @param curr is
459 * the read model action to process.
461 * The try lock operation checks whether the lock is taken. If not,
462 * it falls to the normal lock operation case. If so, it returns
465 * The lock operation has already been checked that it is enabled, so
466 * it just grabs the lock and synchronizes with the previous unlock.
468 * The unlock operation has to re-enable all of the threads that are
469 * waiting on the lock.
471 * @return True if synchronization was updated; false otherwise
473 bool ModelExecution::process_mutex(ModelAction *curr)
475 cdsc::mutex *mutex = curr->get_mutex();
476 struct cdsc::mutex_state *state = NULL;
479 state = mutex->get_state();
481 switch (curr->get_type()) {
482 case ATOMIC_TRYLOCK: {
483 bool success = !state->locked;
484 curr->set_try_lock(success);
486 get_thread(curr)->set_return_value(0);
489 get_thread(curr)->set_return_value(1);
491 //otherwise fall into the lock case
493 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
494 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
495 // assert_bug("Lock access before initialization");
496 state->locked = get_thread(curr);
497 ModelAction *unlock = get_last_unlock(curr);
498 //synchronize with the previous unlock statement
499 if (unlock != NULL) {
500 synchronize(unlock, curr);
506 case ATOMIC_UNLOCK: {
507 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
509 /* wake up the other threads */
510 for (unsigned int i = 0;i < get_num_threads();i++) {
511 Thread *t = get_thread(int_to_id(i));
512 Thread *curr_thrd = get_thread(curr);
513 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
517 /* unlock the lock - after checking who was waiting on it */
518 state->locked = NULL;
520 if (!curr->is_wait())
521 break;/* The rest is only for ATOMIC_WAIT */
525 case ATOMIC_NOTIFY_ALL: {
526 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
527 //activate all the waiting threads
528 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
529 scheduler->wake(get_thread(*rit));
534 case ATOMIC_NOTIFY_ONE: {
535 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
536 if (waiters->size() != 0) {
537 Thread * thread = fuzzer->selectNotify(waiters);
538 scheduler->wake(thread);
550 * Process a write ModelAction
551 * @param curr The ModelAction to process
552 * @return True if the mo_graph was updated or promises were resolved
554 void ModelExecution::process_write(ModelAction *curr)
556 w_modification_order(curr);
557 get_thread(curr)->set_return_value(VALUE_NONE);
561 * Process a fence ModelAction
562 * @param curr The ModelAction to process
563 * @return True if synchronization was updated
565 bool ModelExecution::process_fence(ModelAction *curr)
568 * fence-relaxed: no-op
569 * fence-release: only log the occurence (not in this function), for
570 * use in later synchronization
571 * fence-acquire (this function): search for hypothetical release
573 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
575 bool updated = false;
576 if (curr->is_acquire()) {
577 action_list_t *list = &action_trace;
578 action_list_t::reverse_iterator rit;
579 /* Find X : is_read(X) && X --sb-> curr */
580 for (rit = list->rbegin();rit != list->rend();rit++) {
581 ModelAction *act = *rit;
584 if (act->get_tid() != curr->get_tid())
586 /* Stop at the beginning of the thread */
587 if (act->is_thread_start())
589 /* Stop once we reach a prior fence-acquire */
590 if (act->is_fence() && act->is_acquire())
594 /* read-acquire will find its own release sequences */
595 if (act->is_acquire())
598 /* Establish hypothetical release sequences */
599 ClockVector *cv = get_hb_from_write(act->get_reads_from());
600 if (cv != NULL && curr->get_cv()->merge(cv))
608 * @brief Process the current action for thread-related activity
610 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
611 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
612 * synchronization, etc. This function is a no-op for non-THREAD actions
613 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
615 * @param curr The current action
616 * @return True if synchronization was updated or a thread completed
618 bool ModelExecution::process_thread_action(ModelAction *curr)
620 bool updated = false;
622 switch (curr->get_type()) {
623 case THREAD_CREATE: {
624 thrd_t *thrd = (thrd_t *)curr->get_location();
625 struct thread_params *params = (struct thread_params *)curr->get_value();
626 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
627 curr->set_thread_operand(th);
629 th->set_creation(curr);
632 case PTHREAD_CREATE: {
633 (*(uint32_t *)curr->get_location()) = pthread_counter++;
635 struct pthread_params *params = (struct pthread_params *)curr->get_value();
636 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
637 curr->set_thread_operand(th);
639 th->set_creation(curr);
641 if ( pthread_map.size() < pthread_counter )
642 pthread_map.resize( pthread_counter );
643 pthread_map[ pthread_counter-1 ] = th;
648 Thread *blocking = curr->get_thread_operand();
649 ModelAction *act = get_last_action(blocking->get_id());
650 synchronize(act, curr);
651 updated = true; /* trigger rel-seq checks */
655 Thread *blocking = curr->get_thread_operand();
656 ModelAction *act = get_last_action(blocking->get_id());
657 synchronize(act, curr);
658 updated = true; /* trigger rel-seq checks */
659 break; // WL: to be add (modified)
662 case THREAD_FINISH: {
663 Thread *th = get_thread(curr);
664 /* Wake up any joining threads */
665 for (unsigned int i = 0;i < get_num_threads();i++) {
666 Thread *waiting = get_thread(int_to_id(i));
667 if (waiting->waiting_on() == th &&
668 waiting->get_pending()->is_thread_join())
669 scheduler->wake(waiting);
672 updated = true; /* trigger rel-seq checks */
686 * Initialize the current action by performing one or more of the following
687 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
688 * manipulating backtracking sets, allocating and
689 * initializing clock vectors, and computing the promises to fulfill.
691 * @param curr The current action, as passed from the user context; may be
692 * freed/invalidated after the execution of this function, with a different
693 * action "returned" its place (pass-by-reference)
694 * @return True if curr is a newly-explored action; false otherwise
696 bool ModelExecution::initialize_curr_action(ModelAction **curr)
698 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
699 ModelAction *newcurr = process_rmw(*curr);
705 ModelAction *newcurr = *curr;
707 newcurr->set_seq_number(get_next_seq_num());
708 /* Always compute new clock vector */
709 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
711 /* Assign most recent release fence */
712 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
714 return true; /* This was a new ModelAction */
719 * @brief Establish reads-from relation between two actions
721 * Perform basic operations involved with establishing a concrete rf relation,
722 * including setting the ModelAction data and checking for release sequences.
724 * @param act The action that is reading (must be a read)
725 * @param rf The action from which we are reading (must be a write)
727 * @return True if this read established synchronization
730 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
733 ASSERT(rf->is_write());
735 act->set_read_from(rf);
736 if (act->is_acquire()) {
737 ClockVector *cv = get_hb_from_write(rf);
740 act->get_cv()->merge(cv);
745 * @brief Synchronizes two actions
747 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
748 * This function performs the synchronization as well as providing other hooks
749 * for other checks along with synchronization.
751 * @param first The left-hand side of the synchronizes-with relation
752 * @param second The right-hand side of the synchronizes-with relation
753 * @return True if the synchronization was successful (i.e., was consistent
754 * with the execution order); false otherwise
756 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
758 if (*second < *first) {
759 set_bad_synchronization();
762 return second->synchronize_with(first);
766 * @brief Check whether a model action is enabled.
768 * Checks whether an operation would be successful (i.e., is a lock already
769 * locked, or is the joined thread already complete).
771 * For yield-blocking, yields are never enabled.
773 * @param curr is the ModelAction to check whether it is enabled.
774 * @return a bool that indicates whether the action is enabled.
776 bool ModelExecution::check_action_enabled(ModelAction *curr) {
777 if (curr->is_lock()) {
778 cdsc::mutex *lock = curr->get_mutex();
779 struct cdsc::mutex_state *state = lock->get_state();
782 } else if (curr->is_thread_join()) {
783 Thread *blocking = curr->get_thread_operand();
784 if (!blocking->is_complete()) {
793 * This is the heart of the model checker routine. It performs model-checking
794 * actions corresponding to a given "current action." Among other processes, it
795 * calculates reads-from relationships, updates synchronization clock vectors,
796 * forms a memory_order constraints graph, and handles replay/backtrack
797 * execution when running permutations of previously-observed executions.
799 * @param curr The current action to process
800 * @return The ModelAction that is actually executed; may be different than
803 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
806 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
807 bool newly_explored = initialize_curr_action(&curr);
811 wake_up_sleeping_actions(curr);
813 /* Add the action to lists before any other model-checking tasks */
814 if (!second_part_of_rmw && curr->get_type() != NOOP)
815 add_action_to_lists(curr);
817 if (curr->is_write())
818 add_write_to_lists(curr);
820 SnapVector<ModelAction *> * rf_set = NULL;
821 /* Build may_read_from set for newly-created actions */
822 if (newly_explored && curr->is_read())
823 rf_set = build_may_read_from(curr);
825 process_thread_action(curr);
827 if (curr->is_read() && !second_part_of_rmw) {
828 process_read(curr, rf_set);
831 ASSERT(rf_set == NULL);
834 if (curr->is_write())
837 if (curr->is_fence())
840 if (curr->is_mutex_op())
847 * This is the strongest feasibility check available.
848 * @return whether the current trace (partial or complete) must be a prefix of
851 bool ModelExecution::isfeasibleprefix() const
853 return !is_infeasible();
857 * Print disagnostic information about an infeasible execution
858 * @param prefix A string to prefix the output with; if NULL, then a default
859 * message prefix will be provided
861 void ModelExecution::print_infeasibility(const char *prefix) const
865 if (priv->bad_synchronization)
866 ptr += sprintf(ptr, "[bad sw ordering]");
868 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
872 * Check if the current partial trace is infeasible. Does not check any
873 * end-of-execution flags, which might rule out the execution. Thus, this is
874 * useful only for ruling an execution as infeasible.
875 * @return whether the current partial trace is infeasible.
877 bool ModelExecution::is_infeasible() const
879 return priv->bad_synchronization;
882 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
883 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
884 ModelAction *lastread = get_last_action(act->get_tid());
885 lastread->process_rmw(act);
887 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
893 * @brief Updates the mo_graph with the constraints imposed from the current
896 * Basic idea is the following: Go through each other thread and find
897 * the last action that happened before our read. Two cases:
899 * -# The action is a write: that write must either occur before
900 * the write we read from or be the write we read from.
901 * -# The action is a read: the write that that action read from
902 * must occur before the write we read from or be the same write.
904 * @param curr The current action. Must be a read.
905 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
906 * @return True if modification order edges were added; false otherwise
909 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
911 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
913 ASSERT(curr->is_read());
915 /* Last SC fence in the current thread */
916 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
918 int tid = curr->get_tid();
919 ModelAction *prev_same_thread = NULL;
920 /* Iterate over all threads */
921 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
922 /* Last SC fence in thread tid */
923 ModelAction *last_sc_fence_thread_local = NULL;
925 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
927 /* Last SC fence in thread tid, before last SC fence in current thread */
928 ModelAction *last_sc_fence_thread_before = NULL;
929 if (last_sc_fence_local)
930 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
932 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
933 if (prev_same_thread != NULL &&
934 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
935 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
939 /* Iterate over actions in thread, starting from most recent */
940 action_list_t *list = &(*thrd_lists)[tid];
941 action_list_t::reverse_iterator rit;
942 for (rit = list->rbegin();rit != list->rend();rit++) {
943 ModelAction *act = *rit;
948 /* Don't want to add reflexive edges on 'rf' */
949 if (act->equals(rf)) {
950 if (act->happens_before(curr))
956 if (act->is_write()) {
957 /* C++, Section 29.3 statement 5 */
958 if (curr->is_seqcst() && last_sc_fence_thread_local &&
959 *act < *last_sc_fence_thread_local) {
960 if (mo_graph->checkReachable(rf, act))
962 priorset->push_back(act);
965 /* C++, Section 29.3 statement 4 */
966 else if (act->is_seqcst() && last_sc_fence_local &&
967 *act < *last_sc_fence_local) {
968 if (mo_graph->checkReachable(rf, act))
970 priorset->push_back(act);
973 /* C++, Section 29.3 statement 6 */
974 else if (last_sc_fence_thread_before &&
975 *act < *last_sc_fence_thread_before) {
976 if (mo_graph->checkReachable(rf, act))
978 priorset->push_back(act);
984 * Include at most one act per-thread that "happens
987 if (act->happens_before(curr)) {
989 if (last_sc_fence_local == NULL ||
990 (*last_sc_fence_local < *act)) {
991 prev_same_thread = act;
994 if (act->is_write()) {
995 if (mo_graph->checkReachable(rf, act))
997 priorset->push_back(act);
999 const ModelAction *prevrf = act->get_reads_from();
1000 if (!prevrf->equals(rf)) {
1001 if (mo_graph->checkReachable(rf, prevrf))
1003 priorset->push_back(prevrf);
1005 if (act->get_tid() == curr->get_tid()) {
1006 //Can prune curr from obj list
1019 * Updates the mo_graph with the constraints imposed from the current write.
1021 * Basic idea is the following: Go through each other thread and find
1022 * the lastest action that happened before our write. Two cases:
1024 * (1) The action is a write => that write must occur before
1027 * (2) The action is a read => the write that that action read from
1028 * must occur before the current write.
1030 * This method also handles two other issues:
1032 * (I) Sequential Consistency: Making sure that if the current write is
1033 * seq_cst, that it occurs after the previous seq_cst write.
1035 * (II) Sending the write back to non-synchronizing reads.
1037 * @param curr The current action. Must be a write.
1038 * @param send_fv A vector for stashing reads to which we may pass our future
1039 * value. If NULL, then don't record any future values.
1040 * @return True if modification order edges were added; false otherwise
1042 void ModelExecution::w_modification_order(ModelAction *curr)
1044 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1046 ASSERT(curr->is_write());
1048 if (curr->is_seqcst()) {
1049 /* We have to at least see the last sequentially consistent write,
1050 so we are initialized. */
1051 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1052 if (last_seq_cst != NULL) {
1053 mo_graph->addEdge(last_seq_cst, curr);
1057 /* Last SC fence in the current thread */
1058 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1060 /* Iterate over all threads */
1061 for (i = 0;i < thrd_lists->size();i++) {
1062 /* Last SC fence in thread i, before last SC fence in current thread */
1063 ModelAction *last_sc_fence_thread_before = NULL;
1064 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1065 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1067 /* Iterate over actions in thread, starting from most recent */
1068 action_list_t *list = &(*thrd_lists)[i];
1069 action_list_t::reverse_iterator rit;
1070 bool force_edge = false;
1071 for (rit = list->rbegin();rit != list->rend();rit++) {
1072 ModelAction *act = *rit;
1075 * 1) If RMW and it actually read from something, then we
1076 * already have all relevant edges, so just skip to next
1079 * 2) If RMW and it didn't read from anything, we should
1080 * whatever edge we can get to speed up convergence.
1082 * 3) If normal write, we need to look at earlier actions, so
1083 * continue processing list.
1086 if (curr->is_rmw()) {
1087 if (curr->get_reads_from() != NULL)
1095 /* C++, Section 29.3 statement 7 */
1096 if (last_sc_fence_thread_before && act->is_write() &&
1097 *act < *last_sc_fence_thread_before) {
1098 mo_graph->addEdge(act, curr, force_edge);
1103 * Include at most one act per-thread that "happens
1106 if (act->happens_before(curr)) {
1108 * Note: if act is RMW, just add edge:
1110 * The following edge should be handled elsewhere:
1111 * readfrom(act) --mo--> act
1113 if (act->is_write())
1114 mo_graph->addEdge(act, curr, force_edge);
1115 else if (act->is_read()) {
1116 //if previous read accessed a null, just keep going
1117 mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
1126 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1127 * some constraints. This method checks one the following constraint (others
1128 * require compiler support):
1130 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1131 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1133 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1135 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1137 /* Iterate over all threads */
1138 for (i = 0;i < thrd_lists->size();i++) {
1139 const ModelAction *write_after_read = NULL;
1141 /* Iterate over actions in thread, starting from most recent */
1142 action_list_t *list = &(*thrd_lists)[i];
1143 action_list_t::reverse_iterator rit;
1144 for (rit = list->rbegin();rit != list->rend();rit++) {
1145 ModelAction *act = *rit;
1147 /* Don't disallow due to act == reader */
1148 if (!reader->happens_before(act) || reader == act)
1150 else if (act->is_write())
1151 write_after_read = act;
1152 else if (act->is_read() && act->get_reads_from() != NULL)
1153 write_after_read = act->get_reads_from();
1156 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1163 * Computes the clock vector that happens before propagates from this write.
1165 * @param rf The action that might be part of a release sequence. Must be a
1167 * @return ClockVector of happens before relation.
1170 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1171 SnapVector<ModelAction *> * processset = NULL;
1172 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1173 ASSERT(rf->is_write());
1174 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1176 if (processset == NULL)
1177 processset = new SnapVector<ModelAction *>();
1178 processset->push_back(rf);
1181 int i = (processset == NULL) ? 0 : processset->size();
1183 ClockVector * vec = NULL;
1185 if (rf->get_rfcv() != NULL) {
1186 vec = rf->get_rfcv();
1187 } else if (rf->is_acquire() && rf->is_release()) {
1189 } else if (rf->is_release() && !rf->is_rmw()) {
1191 } else if (rf->is_release()) {
1192 //have rmw that is release and doesn't have a rfcv
1193 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1196 //operation that isn't release
1197 if (rf->get_last_fence_release()) {
1199 vec = rf->get_last_fence_release()->get_cv();
1201 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1207 rf = (*processset)[i];
1211 if (processset != NULL)
1217 * Performs various bookkeeping operations for the current ModelAction. For
1218 * instance, adds action to the per-object, per-thread action vector and to the
1219 * action trace list of all thread actions.
1221 * @param act is the ModelAction to add.
1223 void ModelExecution::add_action_to_lists(ModelAction *act)
1225 int tid = id_to_int(act->get_tid());
1226 ModelAction *uninit = NULL;
1228 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1229 if (list->empty() && act->is_atomic_var()) {
1230 uninit = get_uninitialized_action(act);
1231 uninit_id = id_to_int(uninit->get_tid());
1232 list->push_front(uninit);
1233 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1234 if (uninit_id >= (int)vec->size())
1235 vec->resize(uninit_id + 1);
1236 (*vec)[uninit_id].push_front(uninit);
1238 list->push_back(act);
1240 // Update action trace, a total order of all actions
1241 action_trace.push_back(act);
1243 action_trace.push_front(uninit);
1245 // Update obj_thrd_map, a per location, per thread, order of actions
1246 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1247 if (tid >= (int)vec->size())
1248 vec->resize(priv->next_thread_id);
1249 (*vec)[tid].push_back(act);
1251 (*vec)[uninit_id].push_front(uninit);
1253 // Update thrd_last_action, the last action taken by each thrad
1254 if ((int)thrd_last_action.size() <= tid)
1255 thrd_last_action.resize(get_num_threads());
1256 thrd_last_action[tid] = act;
1258 thrd_last_action[uninit_id] = uninit;
1260 // Update thrd_last_fence_release, the last release fence taken by each thread
1261 if (act->is_fence() && act->is_release()) {
1262 if ((int)thrd_last_fence_release.size() <= tid)
1263 thrd_last_fence_release.resize(get_num_threads());
1264 thrd_last_fence_release[tid] = act;
1267 if (act->is_wait()) {
1268 void *mutex_loc = (void *) act->get_value();
1269 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1271 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1272 if (tid >= (int)vec->size())
1273 vec->resize(priv->next_thread_id);
1274 (*vec)[tid].push_back(act);
1278 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1279 action_list_t::reverse_iterator rit = list->rbegin();
1280 modelclock_t next_seq = act->get_seq_number();
1281 if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
1282 list->push_back(act);
1284 for(;rit != list->rend();rit++) {
1285 if ((*rit)->get_seq_number() == next_seq) {
1286 action_list_t::iterator it = rit.base();
1287 list->insert(it, act);
1294 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1295 action_list_t::reverse_iterator rit = list->rbegin();
1296 modelclock_t next_seq = act->get_seq_number();
1297 if (rit == list->rend()) {
1298 act->create_cv(NULL);
1299 } else if ((*rit)->get_seq_number() == next_seq) {
1300 act->create_cv((*rit));
1301 list->push_back(act);
1303 for(;rit != list->rend();rit++) {
1304 if ((*rit)->get_seq_number() == next_seq) {
1305 act->create_cv((*rit));
1306 action_list_t::iterator it = rit.base();
1307 list->insert(it, act);
1315 * Performs various bookkeeping operations for a normal write. The
1316 * complication is that we are typically inserting a normal write
1317 * lazily, so we need to insert it into the middle of lists.
1319 * @param act is the ModelAction to add.
1322 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1324 int tid = id_to_int(act->get_tid());
1325 insertIntoActionListAndSetCV(&action_trace, act);
1327 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1328 insertIntoActionList(list, act);
1330 // Update obj_thrd_map, a per location, per thread, order of actions
1331 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1332 if (tid >= (int)vec->size())
1333 vec->resize(priv->next_thread_id);
1334 insertIntoActionList(&(*vec)[tid],act);
1336 // Update thrd_last_action, the last action taken by each thrad
1337 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1338 thrd_last_action[tid] = act;
1342 void ModelExecution::add_write_to_lists(ModelAction *write) {
1343 // Update seq_cst map
1344 if (write->is_seqcst())
1345 obj_last_sc_map.put(write->get_location(), write);
1347 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1348 int tid = id_to_int(write->get_tid());
1349 if (tid >= (int)vec->size())
1350 vec->resize(priv->next_thread_id);
1351 (*vec)[tid].push_back(write);
1355 * @brief Get the last action performed by a particular Thread
1356 * @param tid The thread ID of the Thread in question
1357 * @return The last action in the thread
1359 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1361 int threadid = id_to_int(tid);
1362 if (threadid < (int)thrd_last_action.size())
1363 return thrd_last_action[id_to_int(tid)];
1369 * @brief Get the last fence release performed by a particular Thread
1370 * @param tid The thread ID of the Thread in question
1371 * @return The last fence release in the thread, if one exists; NULL otherwise
1373 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1375 int threadid = id_to_int(tid);
1376 if (threadid < (int)thrd_last_fence_release.size())
1377 return thrd_last_fence_release[id_to_int(tid)];
1383 * Gets the last memory_order_seq_cst write (in the total global sequence)
1384 * performed on a particular object (i.e., memory location), not including the
1386 * @param curr The current ModelAction; also denotes the object location to
1388 * @return The last seq_cst write
1390 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1392 void *location = curr->get_location();
1393 return obj_last_sc_map.get(location);
1397 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1398 * performed in a particular thread, prior to a particular fence.
1399 * @param tid The ID of the thread to check
1400 * @param before_fence The fence from which to begin the search; if NULL, then
1401 * search for the most recent fence in the thread.
1402 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1404 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1406 /* All fences should have location FENCE_LOCATION */
1407 action_list_t *list = obj_map.get(FENCE_LOCATION);
1412 action_list_t::reverse_iterator rit = list->rbegin();
1415 for (;rit != list->rend();rit++)
1416 if (*rit == before_fence)
1419 ASSERT(*rit == before_fence);
1423 for (;rit != list->rend();rit++)
1424 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1430 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1431 * location). This function identifies the mutex according to the current
1432 * action, which is presumed to perform on the same mutex.
1433 * @param curr The current ModelAction; also denotes the object location to
1435 * @return The last unlock operation
1437 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1439 void *location = curr->get_location();
1441 action_list_t *list = obj_map.get(location);
1442 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1443 action_list_t::reverse_iterator rit;
1444 for (rit = list->rbegin();rit != list->rend();rit++)
1445 if ((*rit)->is_unlock() || (*rit)->is_wait())
1450 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1452 ModelAction *parent = get_last_action(tid);
1454 parent = get_thread(tid)->get_creation();
1459 * Returns the clock vector for a given thread.
1460 * @param tid The thread whose clock vector we want
1461 * @return Desired clock vector
1463 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1465 ModelAction *firstaction=get_parent_action(tid);
1466 return firstaction != NULL ? firstaction->get_cv() : NULL;
1469 bool valequals(uint64_t val1, uint64_t val2, int size) {
1472 return ((uint8_t)val1) == ((uint8_t)val2);
1474 return ((uint16_t)val1) == ((uint16_t)val2);
1476 return ((uint32_t)val1) == ((uint32_t)val2);
1486 * Build up an initial set of all past writes that this 'read' action may read
1487 * from, as well as any previously-observed future values that must still be valid.
1489 * @param curr is the current ModelAction that we are exploring; it must be a
1492 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1494 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1496 ASSERT(curr->is_read());
1498 ModelAction *last_sc_write = NULL;
1500 if (curr->is_seqcst())
1501 last_sc_write = get_last_seq_cst_write(curr);
1503 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1505 /* Iterate over all threads */
1506 for (i = 0;i < thrd_lists->size();i++) {
1507 /* Iterate over actions in thread, starting from most recent */
1508 action_list_t *list = &(*thrd_lists)[i];
1509 action_list_t::reverse_iterator rit;
1510 for (rit = list->rbegin();rit != list->rend();rit++) {
1511 ModelAction *act = *rit;
1516 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1517 bool allow_read = true;
1519 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1522 /* Need to check whether we will have two RMW reading from the same value */
1523 if (curr->is_rmwr()) {
1524 /* It is okay if we have a failing CAS */
1525 if (!curr->is_rmwrcas() ||
1526 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1527 //Need to make sure we aren't the second RMW
1528 CycleNode * node = mo_graph->getNode_noCreate(act);
1529 if (node != NULL && node->getRMW() != NULL) {
1530 //we are the second RMW
1537 /* Only add feasible reads */
1538 rf_set->push_back(act);
1541 /* Include at most one act per-thread that "happens before" curr */
1542 if (act->happens_before(curr))
1547 if (DBG_ENABLED()) {
1548 model_print("Reached read action:\n");
1550 model_print("End printing read_from_past\n");
1556 * @brief Get an action representing an uninitialized atomic
1558 * This function may create a new one.
1560 * @param curr The current action, which prompts the creation of an UNINIT action
1561 * @return A pointer to the UNINIT ModelAction
1563 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1565 ModelAction *act = curr->get_uninit_action();
1567 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1568 curr->set_uninit_action(act);
1570 act->create_cv(NULL);
1574 static void print_list(const action_list_t *list)
1576 action_list_t::const_iterator it;
1578 model_print("------------------------------------------------------------------------------------\n");
1579 model_print("# t Action type MO Location Value Rf CV\n");
1580 model_print("------------------------------------------------------------------------------------\n");
1582 unsigned int hash = 0;
1584 for (it = list->begin();it != list->end();it++) {
1585 const ModelAction *act = *it;
1586 if (act->get_seq_number() > 0)
1588 hash = hash^(hash<<3)^((*it)->hash());
1590 model_print("HASH %u\n", hash);
1591 model_print("------------------------------------------------------------------------------------\n");
1594 #if SUPPORT_MOD_ORDER_DUMP
1595 void ModelExecution::dumpGraph(char *filename) const
1598 sprintf(buffer, "%s.dot", filename);
1599 FILE *file = fopen(buffer, "w");
1600 fprintf(file, "digraph %s {\n", filename);
1601 mo_graph->dumpNodes(file);
1602 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1604 for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1605 ModelAction *act = *it;
1606 if (act->is_read()) {
1607 mo_graph->dot_print_node(file, act);
1608 mo_graph->dot_print_edge(file,
1609 act->get_reads_from(),
1611 "label=\"rf\", color=red, weight=2");
1613 if (thread_array[act->get_tid()]) {
1614 mo_graph->dot_print_edge(file,
1615 thread_array[id_to_int(act->get_tid())],
1617 "label=\"sb\", color=blue, weight=400");
1620 thread_array[act->get_tid()] = act;
1622 fprintf(file, "}\n");
1623 model_free(thread_array);
1628 /** @brief Prints an execution trace summary. */
1629 void ModelExecution::print_summary() const
1631 #if SUPPORT_MOD_ORDER_DUMP
1632 char buffername[100];
1633 sprintf(buffername, "exec%04u", get_execution_number());
1634 mo_graph->dumpGraphToFile(buffername);
1635 sprintf(buffername, "graph%04u", get_execution_number());
1636 dumpGraph(buffername);
1639 model_print("Execution trace %d:", get_execution_number());
1640 if (isfeasibleprefix()) {
1641 if (scheduler->all_threads_sleeping())
1642 model_print(" SLEEP-SET REDUNDANT");
1643 if (have_bug_reports())
1644 model_print(" DETECTED BUG(S)");
1646 print_infeasibility(" INFEASIBLE");
1649 print_list(&action_trace);
1655 * Add a Thread to the system for the first time. Should only be called once
1657 * @param t The Thread to add
1659 void ModelExecution::add_thread(Thread *t)
1661 unsigned int i = id_to_int(t->get_id());
1662 if (i >= thread_map.size())
1663 thread_map.resize(i + 1);
1665 if (!t->is_model_thread())
1666 scheduler->add_thread(t);
1670 * @brief Get a Thread reference by its ID
1671 * @param tid The Thread's ID
1672 * @return A Thread reference
1674 Thread * ModelExecution::get_thread(thread_id_t tid) const
1676 unsigned int i = id_to_int(tid);
1677 if (i < thread_map.size())
1678 return thread_map[i];
1683 * @brief Get a reference to the Thread in which a ModelAction was executed
1684 * @param act The ModelAction
1685 * @return A Thread reference
1687 Thread * ModelExecution::get_thread(const ModelAction *act) const
1689 return get_thread(act->get_tid());
1693 * @brief Get a Thread reference by its pthread ID
1694 * @param index The pthread's ID
1695 * @return A Thread reference
1697 Thread * ModelExecution::get_pthread(pthread_t pid) {
1703 uint32_t thread_id = x.v;
1704 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1709 * @brief Check if a Thread is currently enabled
1710 * @param t The Thread to check
1711 * @return True if the Thread is currently enabled
1713 bool ModelExecution::is_enabled(Thread *t) const
1715 return scheduler->is_enabled(t);
1719 * @brief Check if a Thread is currently enabled
1720 * @param tid The ID of the Thread to check
1721 * @return True if the Thread is currently enabled
1723 bool ModelExecution::is_enabled(thread_id_t tid) const
1725 return scheduler->is_enabled(tid);
1729 * @brief Select the next thread to execute based on the curren action
1731 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1732 * actions should be followed by the execution of their child thread. In either
1733 * case, the current action should determine the next thread schedule.
1735 * @param curr The current action
1736 * @return The next thread to run, if the current action will determine this
1737 * selection; otherwise NULL
1739 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1741 /* Do not split atomic RMW */
1742 if (curr->is_rmwr())
1743 return get_thread(curr);
1744 /* Follow CREATE with the created thread */
1745 /* which is not needed, because model.cc takes care of this */
1746 if (curr->get_type() == THREAD_CREATE)
1747 return curr->get_thread_operand();
1748 if (curr->get_type() == PTHREAD_CREATE) {
1749 return curr->get_thread_operand();
1755 * Takes the next step in the execution, if possible.
1756 * @param curr The current step to take
1757 * @return Returns the next Thread to run, if any; NULL if this execution
1760 Thread * ModelExecution::take_step(ModelAction *curr)
1762 Thread *curr_thrd = get_thread(curr);
1763 ASSERT(curr_thrd->get_state() == THREAD_READY);
1765 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1766 curr = check_current_action(curr);
1769 /* Process this action in ModelHistory for records*/
1770 model->get_history()->process_action( curr, curr_thrd->get_id() );
1772 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1773 scheduler->remove_thread(curr_thrd);
1775 return action_select_next_thread(curr);
1778 Fuzzer * ModelExecution::getFuzzer() {