11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 static unsigned int atomic_load_count = 0;
24 static unsigned int atomic_store_count = 0;
25 static unsigned int atomic_rmw_count = 0;
27 static unsigned int atomic_fence_count = 0;
28 static unsigned int atomic_lock_count = 0;
29 static unsigned int atomic_trylock_count = 0;
30 static unsigned int atomic_unlock_count = 0;
31 static unsigned int atomic_notify_count = 0;
32 static unsigned int atomic_wait_count = 0;
33 static unsigned int atomic_timedwait_count = 0;
37 * Structure for holding small ModelChecker members that should be snapshotted
39 struct model_snapshot_members {
40 model_snapshot_members() :
41 /* First thread created will have id INITIAL_THREAD_ID */
42 next_thread_id(INITIAL_THREAD_ID),
43 used_sequence_numbers(0),
48 ~model_snapshot_members() {
49 for (unsigned int i = 0;i < bugs.size();i++)
54 unsigned int next_thread_id;
55 modelclock_t used_sequence_numbers;
56 SnapVector<bug_message *> bugs;
57 /** @brief Incorrectly-ordered synchronization was made */
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
68 thread_map(2), /* We'll always need at least 2 threads */
73 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 priv(new struct model_snapshot_members ()),
82 mo_graph(new CycleGraph()),
84 fuzzer(new NewFuzzer()),
90 /* Initialize a model-checker thread, for special ModelActions */
91 model_thread = new Thread(get_next_id());
92 add_thread(model_thread);
93 fuzzer->register_engine(m, this);
94 scheduler->register_engine(this);
96 pthread_key_create(&pthreadkey, tlsdestructor);
100 /** @brief Destructor */
101 ModelExecution::~ModelExecution()
103 for (unsigned int i = 0;i < get_num_threads();i++)
104 delete get_thread(int_to_id(i));
110 int ModelExecution::get_execution_number() const
112 return model->get_execution_number();
115 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
117 SnapVector<action_list_t> *tmp = hash->get(ptr);
119 tmp = new SnapVector<action_list_t>();
125 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
127 simple_action_list_t *tmp = hash->get(ptr);
129 tmp = new simple_action_list_t();
135 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
137 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
139 tmp = new SnapVector<simple_action_list_t>();
146 * When vectors of action lists are reallocated due to resize, the root address of
147 * action lists may change. Hence we need to fix the parent pointer of the children
150 static void fixup_action_list(SnapVector<action_list_t> * vec)
152 for (uint i = 0;i < vec->size();i++) {
153 action_list_t * list = &(*vec)[i];
160 static inline void record_atomic_stats(ModelAction * act)
162 switch (act->get_type()) {
164 atomic_store_count++;
173 atomic_fence_count++;
179 atomic_trylock_count++;
182 atomic_unlock_count++;
184 case ATOMIC_NOTIFY_ONE:
185 case ATOMIC_NOTIFY_ALL:
186 atomic_notify_count++;
191 case ATOMIC_TIMEDWAIT:
192 atomic_timedwait_count++;
198 void print_atomic_accesses()
200 model_print("atomic store count: %u\n", atomic_store_count);
201 model_print("atomic load count: %u\n", atomic_load_count);
202 model_print("atomic rmw count: %u\n", atomic_rmw_count);
204 model_print("atomic fence count: %u\n", atomic_fence_count);
205 model_print("atomic lock count: %u\n", atomic_lock_count);
206 model_print("atomic trylock count: %u\n", atomic_trylock_count);
207 model_print("atomic unlock count: %u\n", atomic_unlock_count);
208 model_print("atomic notify count: %u\n", atomic_notify_count);
209 model_print("atomic wait count: %u\n", atomic_wait_count);
210 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
213 /** @return a thread ID for a new Thread */
214 thread_id_t ModelExecution::get_next_id()
216 return priv->next_thread_id++;
219 /** @return the number of user threads created during this execution */
220 unsigned int ModelExecution::get_num_threads() const
222 return priv->next_thread_id;
225 /** @return a sequence number for a new ModelAction */
226 modelclock_t ModelExecution::get_next_seq_num()
228 return ++priv->used_sequence_numbers;
231 /** @return a sequence number for a new ModelAction */
232 modelclock_t ModelExecution::get_curr_seq_num()
234 return priv->used_sequence_numbers;
237 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
238 void ModelExecution::restore_last_seq_num()
240 priv->used_sequence_numbers--;
244 * @brief Should the current action wake up a given thread?
246 * @param curr The current action
247 * @param thread The thread that we might wake up
248 * @return True, if we should wake up the sleeping thread; false otherwise
250 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
252 const ModelAction *asleep = thread->get_pending();
253 /* Don't allow partial RMW to wake anyone up */
256 /* Synchronizing actions may have been backtracked */
257 if (asleep->could_synchronize_with(curr))
259 /* All acquire/release fences and fence-acquire/store-release */
260 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
262 /* Fence-release + store can awake load-acquire on the same location */
263 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
264 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
265 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
268 /* The sleep is literally sleeping */
269 if (asleep->is_sleep()) {
270 if (fuzzer->shouldWake(asleep))
277 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
279 for (unsigned int i = 0;i < get_num_threads();i++) {
280 thread_id_t tid = int_to_id(i);
281 Thread *thr = get_thread(tid);
282 if (scheduler->is_sleep_set(tid)) {
283 if (should_wake_up(curr, thr)) {
284 /* Remove this thread from sleep set */
285 scheduler->remove_sleep(thr);
286 if (thr->get_pending()->is_sleep())
287 thr->set_wakeup_state(true);
293 void ModelExecution::assert_bug(const char *msg)
295 priv->bugs.push_back(new bug_message(msg));
299 /** @return True, if any bugs have been reported for this execution */
300 bool ModelExecution::have_bug_reports() const
302 return priv->bugs.size() != 0;
305 SnapVector<bug_message *> * ModelExecution::get_bugs() const
311 * Check whether the current trace has triggered an assertion which should halt
314 * @return True, if the execution should be aborted; false otherwise
316 bool ModelExecution::has_asserted() const
318 return priv->asserted;
322 * Trigger a trace assertion which should cause this execution to be halted.
323 * This can be due to a detected bug or due to an infeasibility that should
326 void ModelExecution::set_assert()
328 priv->asserted = true;
332 * Check if we are in a deadlock. Should only be called at the end of an
333 * execution, although it should not give false positives in the middle of an
334 * execution (there should be some ENABLED thread).
336 * @return True if program is in a deadlock; false otherwise
338 bool ModelExecution::is_deadlocked() const
340 bool blocking_threads = false;
341 for (unsigned int i = 0;i < get_num_threads();i++) {
342 thread_id_t tid = int_to_id(i);
345 Thread *t = get_thread(tid);
346 if (!t->is_model_thread() && t->get_pending())
347 blocking_threads = true;
349 return blocking_threads;
353 * Check if this is a complete execution. That is, have all thread completed
354 * execution (rather than exiting because sleep sets have forced a redundant
357 * @return True if the execution is complete.
359 bool ModelExecution::is_complete_execution() const
361 for (unsigned int i = 0;i < get_num_threads();i++)
362 if (is_enabled(int_to_id(i)))
367 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
368 uint64_t value = *((const uint64_t *) location);
369 modelclock_t storeclock;
370 thread_id_t storethread;
371 getStoreThreadAndClock(location, &storethread, &storeclock);
372 setAtomicStoreFlag(location);
373 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
374 act->set_seq_number(storeclock);
375 add_normal_write_to_lists(act);
376 add_write_to_lists(act);
377 w_modification_order(act);
379 model->get_history()->process_action(act, act->get_tid());
385 * Processes a read model action.
386 * @param curr is the read model action to process.
387 * @param rf_set is the set of model actions we can possibly read from
388 * @return True if the read can be pruned from the thread map list.
390 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
392 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
393 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
394 if (hasnonatomicstore) {
395 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
396 rf_set->push_back(nonatomicstore);
399 // Remove writes that violate read modification order
402 while (i < rf_set->size()) {
403 ModelAction * rf = (*rf_set)[i];
404 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
405 (*rf_set)[i] = rf_set->back();
412 int index = fuzzer->selectWrite(curr, rf_set);
414 ModelAction *rf = (*rf_set)[index];
417 bool canprune = false;
418 if (r_modification_order(curr, rf, priorset, &canprune)) {
419 for(unsigned int i=0;i<priorset->size();i++) {
420 mo_graph->addEdge((*priorset)[i], rf);
423 get_thread(curr)->set_return_value(curr->get_return_value());
425 //Update acquire fence clock vector
426 ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
428 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
429 return canprune && (curr->get_type() == ATOMIC_READ);
432 (*rf_set)[index] = rf_set->back();
438 * Processes a lock, trylock, or unlock model action. @param curr is
439 * the read model action to process.
441 * The try lock operation checks whether the lock is taken. If not,
442 * it falls to the normal lock operation case. If so, it returns
445 * The lock operation has already been checked that it is enabled, so
446 * it just grabs the lock and synchronizes with the previous unlock.
448 * The unlock operation has to re-enable all of the threads that are
449 * waiting on the lock.
451 * @return True if synchronization was updated; false otherwise
453 bool ModelExecution::process_mutex(ModelAction *curr)
455 cdsc::mutex *mutex = curr->get_mutex();
456 struct cdsc::mutex_state *state = NULL;
459 state = mutex->get_state();
461 switch (curr->get_type()) {
462 case ATOMIC_TRYLOCK: {
463 bool success = !state->locked;
464 curr->set_try_lock(success);
466 get_thread(curr)->set_return_value(0);
469 get_thread(curr)->set_return_value(1);
471 //otherwise fall into the lock case
473 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
474 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
475 // assert_bug("Lock access before initialization");
477 // TODO: lock count for recursive mutexes
478 state->locked = get_thread(curr);
479 ModelAction *unlock = get_last_unlock(curr);
480 //synchronize with the previous unlock statement
481 if (unlock != NULL) {
482 synchronize(unlock, curr);
488 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
489 if (fuzzer->shouldWait(curr)) {
490 /* wake up the other threads */
491 for (unsigned int i = 0;i < get_num_threads();i++) {
492 Thread *t = get_thread(int_to_id(i));
493 Thread *curr_thrd = get_thread(curr);
494 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
498 /* unlock the lock - after checking who was waiting on it */
499 state->locked = NULL;
501 /* remove old wait action and disable this thread */
502 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
503 for (sllnode<ModelAction *> * it = waiters->begin();it != NULL;it = it->getNext()) {
504 ModelAction * wait = it->getVal();
505 if (wait->get_tid() == curr->get_tid()) {
511 waiters->push_back(curr);
512 scheduler->sleep(get_thread(curr));
517 case ATOMIC_TIMEDWAIT:
518 case ATOMIC_UNLOCK: {
519 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
520 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
521 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
522 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
523 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
524 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
525 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
526 //MUST EVENMTUALLY RELEASE...
528 // TODO: lock count for recursive mutexes
529 /* wake up the other threads */
530 for (unsigned int i = 0;i < get_num_threads();i++) {
531 Thread *t = get_thread(int_to_id(i));
532 Thread *curr_thrd = get_thread(curr);
533 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
537 /* unlock the lock - after checking who was waiting on it */
538 state->locked = NULL;
541 case ATOMIC_NOTIFY_ALL: {
542 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
543 //activate all the waiting threads
544 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
545 scheduler->wake(get_thread(rit->getVal()));
550 case ATOMIC_NOTIFY_ONE: {
551 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
552 if (waiters->size() != 0) {
553 Thread * thread = fuzzer->selectNotify(waiters);
554 scheduler->wake(thread);
566 * Process a write ModelAction
567 * @param curr The ModelAction to process
568 * @return True if the mo_graph was updated or promises were resolved
570 void ModelExecution::process_write(ModelAction *curr)
572 w_modification_order(curr);
573 get_thread(curr)->set_return_value(VALUE_NONE);
577 * Process a fence ModelAction
578 * @param curr The ModelAction to process
579 * @return True if synchronization was updated
581 void ModelExecution::process_fence(ModelAction *curr)
584 * fence-relaxed: no-op
585 * fence-release: only log the occurence (not in this function), for
586 * use in later synchronization
587 * fence-acquire (this function): search for hypothetical release
589 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
591 if (curr->is_acquire()) {
592 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
597 * @brief Process the current action for thread-related activity
599 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
600 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
601 * synchronization, etc. This function is a no-op for non-THREAD actions
602 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
604 * @param curr The current action
605 * @return True if synchronization was updated or a thread completed
607 void ModelExecution::process_thread_action(ModelAction *curr)
609 switch (curr->get_type()) {
610 case THREAD_CREATE: {
611 thrd_t *thrd = (thrd_t *)curr->get_location();
612 struct thread_params *params = (struct thread_params *)curr->get_value();
613 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
614 curr->set_thread_operand(th);
616 th->set_creation(curr);
619 case PTHREAD_CREATE: {
620 (*(uint32_t *)curr->get_location()) = pthread_counter++;
622 struct pthread_params *params = (struct pthread_params *)curr->get_value();
623 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
624 curr->set_thread_operand(th);
626 th->set_creation(curr);
628 if ( pthread_map.size() < pthread_counter )
629 pthread_map.resize( pthread_counter );
630 pthread_map[ pthread_counter-1 ] = th;
635 Thread *blocking = curr->get_thread_operand();
636 ModelAction *act = get_last_action(blocking->get_id());
637 synchronize(act, curr);
641 Thread *blocking = curr->get_thread_operand();
642 ModelAction *act = get_last_action(blocking->get_id());
643 synchronize(act, curr);
647 case THREADONLY_FINISH:
648 case THREAD_FINISH: {
649 Thread *th = get_thread(curr);
650 if (curr->get_type() == THREAD_FINISH &&
651 th == model->getInitThread()) {
657 /* Wake up any joining threads */
658 for (unsigned int i = 0;i < get_num_threads();i++) {
659 Thread *waiting = get_thread(int_to_id(i));
660 if (waiting->waiting_on() == th &&
661 waiting->get_pending()->is_thread_join())
662 scheduler->wake(waiting);
671 Thread *th = get_thread(curr);
672 th->set_pending(curr);
673 scheduler->add_sleep(th);
682 * Initialize the current action by performing one or more of the following
683 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
684 * manipulating backtracking sets, allocating and
685 * initializing clock vectors, and computing the promises to fulfill.
687 * @param curr The current action, as passed from the user context; may be
688 * freed/invalidated after the execution of this function, with a different
689 * action "returned" its place (pass-by-reference)
690 * @return True if curr is a newly-explored action; false otherwise
692 bool ModelExecution::initialize_curr_action(ModelAction **curr)
694 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
695 ModelAction *newcurr = process_rmw(*curr);
701 ModelAction *newcurr = *curr;
703 newcurr->set_seq_number(get_next_seq_num());
704 /* Always compute new clock vector */
705 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
707 /* Assign most recent release fence */
708 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
710 return true; /* This was a new ModelAction */
715 * @brief Establish reads-from relation between two actions
717 * Perform basic operations involved with establishing a concrete rf relation,
718 * including setting the ModelAction data and checking for release sequences.
720 * @param act The action that is reading (must be a read)
721 * @param rf The action from which we are reading (must be a write)
723 * @return True if this read established synchronization
726 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
729 ASSERT(rf->is_write());
731 act->set_read_from(rf);
732 if (act->is_acquire()) {
733 ClockVector *cv = get_hb_from_write(rf);
736 act->get_cv()->merge(cv);
741 * @brief Synchronizes two actions
743 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
744 * This function performs the synchronization as well as providing other hooks
745 * for other checks along with synchronization.
747 * @param first The left-hand side of the synchronizes-with relation
748 * @param second The right-hand side of the synchronizes-with relation
749 * @return True if the synchronization was successful (i.e., was consistent
750 * with the execution order); false otherwise
752 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
754 if (*second < *first) {
755 ASSERT(0); //This should not happend
758 return second->synchronize_with(first);
762 * @brief Check whether a model action is enabled.
764 * Checks whether an operation would be successful (i.e., is a lock already
765 * locked, or is the joined thread already complete).
767 * For yield-blocking, yields are never enabled.
769 * @param curr is the ModelAction to check whether it is enabled.
770 * @return a bool that indicates whether the action is enabled.
772 bool ModelExecution::check_action_enabled(ModelAction *curr) {
773 if (curr->is_lock()) {
774 cdsc::mutex *lock = curr->get_mutex();
775 struct cdsc::mutex_state *state = lock->get_state();
777 Thread *lock_owner = (Thread *)state->locked;
778 Thread *curr_thread = get_thread(curr);
779 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
785 } else if (curr->is_thread_join()) {
786 Thread *blocking = curr->get_thread_operand();
787 if (!blocking->is_complete()) {
790 } else if (curr->is_sleep()) {
791 if (!fuzzer->shouldSleep(curr))
799 * This is the heart of the model checker routine. It performs model-checking
800 * actions corresponding to a given "current action." Among other processes, it
801 * calculates reads-from relationships, updates synchronization clock vectors,
802 * forms a memory_order constraints graph, and handles replay/backtrack
803 * execution when running permutations of previously-observed executions.
805 * @param curr The current action to process
806 * @return The ModelAction that is actually executed; may be different than
809 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
812 bool newly_explored = initialize_curr_action(&curr);
816 wake_up_sleeping_actions(curr);
818 SnapVector<ModelAction *> * rf_set = NULL;
819 bool canprune = false;
820 /* Build may_read_from set for newly-created actions */
821 if (curr->is_read() && newly_explored) {
822 rf_set = build_may_read_from(curr);
823 canprune = process_read(curr, rf_set);
826 ASSERT(rf_set == NULL);
828 /* Add the action to lists if not the second part of a rmw */
829 if (newly_explored) {
831 record_atomic_stats(curr);
833 add_action_to_lists(curr, canprune);
836 if (curr->is_write())
837 add_write_to_lists(curr);
839 process_thread_action(curr);
841 if (curr->is_write())
844 if (curr->is_fence())
847 if (curr->is_mutex_op())
853 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
854 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
855 ModelAction *lastread = get_last_action(act->get_tid());
856 lastread->process_rmw(act);
858 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
864 * @brief Updates the mo_graph with the constraints imposed from the current
867 * Basic idea is the following: Go through each other thread and find
868 * the last action that happened before our read. Two cases:
870 * -# The action is a write: that write must either occur before
871 * the write we read from or be the write we read from.
872 * -# The action is a read: the write that that action read from
873 * must occur before the write we read from or be the same write.
875 * @param curr The current action. Must be a read.
876 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
877 * @param check_only If true, then only check whether the current action satisfies
878 * read modification order or not, without modifiying priorset and canprune.
880 * @return True if modification order edges were added; false otherwise
883 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
884 SnapVector<ModelAction *> * priorset, bool * canprune)
886 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
887 ASSERT(curr->is_read());
889 /* Last SC fence in the current thread */
890 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
892 int tid = curr->get_tid();
894 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
895 if ((int)thrd_lists->size() <= tid) {
896 uint oldsize = thrd_lists->size();
897 thrd_lists->resize(priv->next_thread_id);
898 for(uint i = oldsize;i < priv->next_thread_id;i++)
899 new (&(*thrd_lists)[i]) action_list_t();
901 fixup_action_list(thrd_lists);
904 ModelAction *prev_same_thread = NULL;
905 /* Iterate over all threads */
906 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
907 /* Last SC fence in thread tid */
908 ModelAction *last_sc_fence_thread_local = NULL;
910 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
912 /* Last SC fence in thread tid, before last SC fence in current thread */
913 ModelAction *last_sc_fence_thread_before = NULL;
914 if (last_sc_fence_local)
915 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
917 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
918 if (prev_same_thread != NULL &&
919 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
920 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
924 /* Iterate over actions in thread, starting from most recent */
925 action_list_t *list = &(*thrd_lists)[tid];
926 sllnode<ModelAction *> * rit;
927 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
928 ModelAction *act = rit->getVal();
933 /* Don't want to add reflexive edges on 'rf' */
934 if (act->equals(rf)) {
935 if (act->happens_before(curr))
941 if (act->is_write()) {
942 /* C++, Section 29.3 statement 5 */
943 if (curr->is_seqcst() && last_sc_fence_thread_local &&
944 *act < *last_sc_fence_thread_local) {
945 if (mo_graph->checkReachable(rf, act))
947 priorset->push_back(act);
950 /* C++, Section 29.3 statement 4 */
951 else if (act->is_seqcst() && last_sc_fence_local &&
952 *act < *last_sc_fence_local) {
953 if (mo_graph->checkReachable(rf, act))
955 priorset->push_back(act);
958 /* C++, Section 29.3 statement 6 */
959 else if (last_sc_fence_thread_before &&
960 *act < *last_sc_fence_thread_before) {
961 if (mo_graph->checkReachable(rf, act))
963 priorset->push_back(act);
969 * Include at most one act per-thread that "happens
972 if (act->happens_before(curr)) {
974 if (last_sc_fence_local == NULL ||
975 (*last_sc_fence_local < *act)) {
976 prev_same_thread = act;
979 if (act->is_write()) {
980 if (mo_graph->checkReachable(rf, act))
982 priorset->push_back(act);
984 ModelAction *prevrf = act->get_reads_from();
985 if (!prevrf->equals(rf)) {
986 if (mo_graph->checkReachable(rf, prevrf))
988 priorset->push_back(prevrf);
990 if (act->get_tid() == curr->get_tid()) {
991 //Can prune curr from obj list
1004 * Updates the mo_graph with the constraints imposed from the current write.
1006 * Basic idea is the following: Go through each other thread and find
1007 * the lastest action that happened before our write. Two cases:
1009 * (1) The action is a write => that write must occur before
1012 * (2) The action is a read => the write that that action read from
1013 * must occur before the current write.
1015 * This method also handles two other issues:
1017 * (I) Sequential Consistency: Making sure that if the current write is
1018 * seq_cst, that it occurs after the previous seq_cst write.
1020 * (II) Sending the write back to non-synchronizing reads.
1022 * @param curr The current action. Must be a write.
1023 * @param send_fv A vector for stashing reads to which we may pass our future
1024 * value. If NULL, then don't record any future values.
1025 * @return True if modification order edges were added; false otherwise
1027 void ModelExecution::w_modification_order(ModelAction *curr)
1029 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1031 ASSERT(curr->is_write());
1033 SnapList<ModelAction *> edgeset;
1035 if (curr->is_seqcst()) {
1036 /* We have to at least see the last sequentially consistent write,
1037 so we are initialized. */
1038 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1039 if (last_seq_cst != NULL) {
1040 edgeset.push_back(last_seq_cst);
1042 //update map for next query
1043 obj_last_sc_map.put(curr->get_location(), curr);
1046 /* Last SC fence in the current thread */
1047 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1049 /* Iterate over all threads */
1050 for (i = 0;i < thrd_lists->size();i++) {
1051 /* Last SC fence in thread i, before last SC fence in current thread */
1052 ModelAction *last_sc_fence_thread_before = NULL;
1053 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1054 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1056 /* Iterate over actions in thread, starting from most recent */
1057 action_list_t *list = &(*thrd_lists)[i];
1058 sllnode<ModelAction*>* rit;
1059 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1060 ModelAction *act = rit->getVal();
1063 * 1) If RMW and it actually read from something, then we
1064 * already have all relevant edges, so just skip to next
1067 * 2) If RMW and it didn't read from anything, we should
1068 * whatever edge we can get to speed up convergence.
1070 * 3) If normal write, we need to look at earlier actions, so
1071 * continue processing list.
1073 if (curr->is_rmw()) {
1074 if (curr->get_reads_from() != NULL)
1082 /* C++, Section 29.3 statement 7 */
1083 if (last_sc_fence_thread_before && act->is_write() &&
1084 *act < *last_sc_fence_thread_before) {
1085 edgeset.push_back(act);
1090 * Include at most one act per-thread that "happens
1093 if (act->happens_before(curr)) {
1095 * Note: if act is RMW, just add edge:
1097 * The following edge should be handled elsewhere:
1098 * readfrom(act) --mo--> act
1100 if (act->is_write())
1101 edgeset.push_back(act);
1102 else if (act->is_read()) {
1103 //if previous read accessed a null, just keep going
1104 edgeset.push_back(act->get_reads_from());
1110 mo_graph->addEdges(&edgeset, curr);
1115 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1116 * some constraints. This method checks one the following constraint (others
1117 * require compiler support):
1119 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1120 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1122 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1124 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1126 /* Iterate over all threads */
1127 for (i = 0;i < thrd_lists->size();i++) {
1128 const ModelAction *write_after_read = NULL;
1130 /* Iterate over actions in thread, starting from most recent */
1131 action_list_t *list = &(*thrd_lists)[i];
1132 sllnode<ModelAction *>* rit;
1133 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1134 ModelAction *act = rit->getVal();
1136 /* Don't disallow due to act == reader */
1137 if (!reader->happens_before(act) || reader == act)
1139 else if (act->is_write())
1140 write_after_read = act;
1141 else if (act->is_read() && act->get_reads_from() != NULL)
1142 write_after_read = act->get_reads_from();
1145 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1152 * Computes the clock vector that happens before propagates from this write.
1154 * @param rf The action that might be part of a release sequence. Must be a
1156 * @return ClockVector of happens before relation.
1159 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1160 SnapVector<ModelAction *> * processset = NULL;
1161 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1162 ASSERT(rf->is_write());
1163 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1165 if (processset == NULL)
1166 processset = new SnapVector<ModelAction *>();
1167 processset->push_back(rf);
1170 int i = (processset == NULL) ? 0 : processset->size();
1172 ClockVector * vec = NULL;
1174 if (rf->get_rfcv() != NULL) {
1175 vec = rf->get_rfcv();
1176 } else if (rf->is_acquire() && rf->is_release()) {
1178 } else if (rf->is_release() && !rf->is_rmw()) {
1180 } else if (rf->is_release()) {
1181 //have rmw that is release and doesn't have a rfcv
1182 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1185 //operation that isn't release
1186 if (rf->get_last_fence_release()) {
1188 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1190 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1194 vec = new ClockVector(NULL, NULL);
1197 vec = new ClockVector(vec, NULL);
1204 rf = (*processset)[i];
1208 if (processset != NULL)
1214 * Performs various bookkeeping operations for the current ModelAction. For
1215 * instance, adds action to the per-object, per-thread action vector and to the
1216 * action trace list of all thread actions.
1218 * @param act is the ModelAction to add.
1220 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1222 int tid = id_to_int(act->get_tid());
1223 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1224 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1225 act->setActionRef(list->add_back(act));
1228 // Update action trace, a total order of all actions
1229 action_trace.addAction(act);
1232 // Update obj_thrd_map, a per location, per thread, order of actions
1233 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1234 if ((int)vec->size() <= tid) {
1235 uint oldsize = vec->size();
1236 vec->resize(priv->next_thread_id);
1237 for(uint i = oldsize;i < priv->next_thread_id;i++)
1238 new (&(*vec)[i]) action_list_t();
1240 fixup_action_list(vec);
1242 if (!canprune && (act->is_read() || act->is_write()))
1243 (*vec)[tid].addAction(act);
1245 // Update thrd_last_action, the last action taken by each thread
1246 if ((int)thrd_last_action.size() <= tid)
1247 thrd_last_action.resize(get_num_threads());
1248 thrd_last_action[tid] = act;
1250 // Update thrd_last_fence_release, the last release fence taken by each thread
1251 if (act->is_fence() && act->is_release()) {
1252 if ((int)thrd_last_fence_release.size() <= tid)
1253 thrd_last_fence_release.resize(get_num_threads());
1254 thrd_last_fence_release[tid] = act;
1257 if (act->is_wait()) {
1258 void *mutex_loc = (void *) act->get_value();
1259 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1263 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1264 list->addAction(act);
1267 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1268 act->create_cv(NULL);
1269 list->addAction(act);
1273 * Performs various bookkeeping operations for a normal write. The
1274 * complication is that we are typically inserting a normal write
1275 * lazily, so we need to insert it into the middle of lists.
1277 * @param act is the ModelAction to add.
1280 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1282 int tid = id_to_int(act->get_tid());
1283 insertIntoActionListAndSetCV(&action_trace, act);
1285 // Update obj_thrd_map, a per location, per thread, order of actions
1286 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1287 if (tid >= (int)vec->size()) {
1288 uint oldsize =vec->size();
1289 vec->resize(priv->next_thread_id);
1290 for(uint i=oldsize;i<priv->next_thread_id;i++)
1291 new (&(*vec)[i]) action_list_t();
1293 fixup_action_list(vec);
1295 insertIntoActionList(&(*vec)[tid],act);
1297 ModelAction * lastact = thrd_last_action[tid];
1298 // Update thrd_last_action, the last action taken by each thrad
1299 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1300 thrd_last_action[tid] = act;
1304 void ModelExecution::add_write_to_lists(ModelAction *write) {
1305 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1306 int tid = id_to_int(write->get_tid());
1307 if (tid >= (int)vec->size()) {
1308 uint oldsize =vec->size();
1309 vec->resize(priv->next_thread_id);
1310 for(uint i=oldsize;i<priv->next_thread_id;i++)
1311 new (&(*vec)[i]) simple_action_list_t();
1313 write->setActionRef((*vec)[tid].add_back(write));
1317 * @brief Get the last action performed by a particular Thread
1318 * @param tid The thread ID of the Thread in question
1319 * @return The last action in the thread
1321 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1323 int threadid = id_to_int(tid);
1324 if (threadid < (int)thrd_last_action.size())
1325 return thrd_last_action[id_to_int(tid)];
1331 * @brief Get the last fence release performed by a particular Thread
1332 * @param tid The thread ID of the Thread in question
1333 * @return The last fence release in the thread, if one exists; NULL otherwise
1335 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1337 int threadid = id_to_int(tid);
1338 if (threadid < (int)thrd_last_fence_release.size())
1339 return thrd_last_fence_release[id_to_int(tid)];
1345 * Gets the last memory_order_seq_cst write (in the total global sequence)
1346 * performed on a particular object (i.e., memory location), not including the
1348 * @param curr The current ModelAction; also denotes the object location to
1350 * @return The last seq_cst write
1352 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1354 void *location = curr->get_location();
1355 return obj_last_sc_map.get(location);
1359 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1360 * performed in a particular thread, prior to a particular fence.
1361 * @param tid The ID of the thread to check
1362 * @param before_fence The fence from which to begin the search; if NULL, then
1363 * search for the most recent fence in the thread.
1364 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1366 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1368 /* All fences should have location FENCE_LOCATION */
1369 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1374 sllnode<ModelAction*>* rit = list->end();
1377 for (;rit != NULL;rit=rit->getPrev())
1378 if (rit->getVal() == before_fence)
1381 ASSERT(rit->getVal() == before_fence);
1385 for (;rit != NULL;rit=rit->getPrev()) {
1386 ModelAction *act = rit->getVal();
1387 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1394 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1395 * location). This function identifies the mutex according to the current
1396 * action, which is presumed to perform on the same mutex.
1397 * @param curr The current ModelAction; also denotes the object location to
1399 * @return The last unlock operation
1401 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1403 void *location = curr->get_location();
1405 simple_action_list_t *list = obj_map.get(location);
1409 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1410 sllnode<ModelAction*>* rit;
1411 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1412 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1413 return rit->getVal();
1417 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1419 ModelAction *parent = get_last_action(tid);
1421 parent = get_thread(tid)->get_creation();
1426 * Returns the clock vector for a given thread.
1427 * @param tid The thread whose clock vector we want
1428 * @return Desired clock vector
1430 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1432 ModelAction *firstaction=get_parent_action(tid);
1433 return firstaction != NULL ? firstaction->get_cv() : NULL;
1436 bool valequals(uint64_t val1, uint64_t val2, int size) {
1439 return ((uint8_t)val1) == ((uint8_t)val2);
1441 return ((uint16_t)val1) == ((uint16_t)val2);
1443 return ((uint32_t)val1) == ((uint32_t)val2);
1453 * Build up an initial set of all past writes that this 'read' action may read
1454 * from, as well as any previously-observed future values that must still be valid.
1456 * @param curr is the current ModelAction that we are exploring; it must be a
1459 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1461 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1463 ASSERT(curr->is_read());
1465 ModelAction *last_sc_write = NULL;
1467 if (curr->is_seqcst())
1468 last_sc_write = get_last_seq_cst_write(curr);
1470 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1472 /* Iterate over all threads */
1473 if (thrd_lists != NULL)
1474 for (i = 0;i < thrd_lists->size();i++) {
1475 /* Iterate over actions in thread, starting from most recent */
1476 simple_action_list_t *list = &(*thrd_lists)[i];
1477 sllnode<ModelAction *> * rit;
1478 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1479 ModelAction *act = rit->getVal();
1484 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1485 bool allow_read = true;
1487 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1490 /* Need to check whether we will have two RMW reading from the same value */
1491 if (curr->is_rmwr()) {
1492 /* It is okay if we have a failing CAS */
1493 if (!curr->is_rmwrcas() ||
1494 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1495 //Need to make sure we aren't the second RMW
1496 CycleNode * node = mo_graph->getNode_noCreate(act);
1497 if (node != NULL && node->getRMW() != NULL) {
1498 //we are the second RMW
1505 /* Only add feasible reads */
1506 rf_set->push_back(act);
1509 /* Include at most one act per-thread that "happens before" curr */
1510 if (act->happens_before(curr))
1515 if (DBG_ENABLED()) {
1516 model_print("Reached read action:\n");
1518 model_print("End printing read_from_past\n");
1523 static void print_list(action_list_t *list)
1525 sllnode<ModelAction*> *it;
1527 model_print("------------------------------------------------------------------------------------\n");
1528 model_print("# t Action type MO Location Value Rf CV\n");
1529 model_print("------------------------------------------------------------------------------------\n");
1531 unsigned int hash = 0;
1533 for (it = list->begin();it != NULL;it=it->getNext()) {
1534 const ModelAction *act = it->getVal();
1535 if (act->get_seq_number() > 0)
1537 hash = hash^(hash<<3)^(it->getVal()->hash());
1539 model_print("HASH %u\n", hash);
1540 model_print("------------------------------------------------------------------------------------\n");
1543 #if SUPPORT_MOD_ORDER_DUMP
1544 void ModelExecution::dumpGraph(char *filename)
1547 sprintf(buffer, "%s.dot", filename);
1548 FILE *file = fopen(buffer, "w");
1549 fprintf(file, "digraph %s {\n", filename);
1550 mo_graph->dumpNodes(file);
1551 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1553 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1554 ModelAction *act = it->getVal();
1555 if (act->is_read()) {
1556 mo_graph->dot_print_node(file, act);
1557 mo_graph->dot_print_edge(file,
1558 act->get_reads_from(),
1560 "label=\"rf\", color=red, weight=2");
1562 if (thread_array[act->get_tid()]) {
1563 mo_graph->dot_print_edge(file,
1564 thread_array[id_to_int(act->get_tid())],
1566 "label=\"sb\", color=blue, weight=400");
1569 thread_array[act->get_tid()] = act;
1571 fprintf(file, "}\n");
1572 model_free(thread_array);
1577 /** @brief Prints an execution trace summary. */
1578 void ModelExecution::print_summary()
1580 #if SUPPORT_MOD_ORDER_DUMP
1581 char buffername[100];
1582 sprintf(buffername, "exec%04u", get_execution_number());
1583 mo_graph->dumpGraphToFile(buffername);
1584 sprintf(buffername, "graph%04u", get_execution_number());
1585 dumpGraph(buffername);
1588 model_print("Execution trace %d:", get_execution_number());
1589 if (scheduler->all_threads_sleeping())
1590 model_print(" SLEEP-SET REDUNDANT");
1591 if (have_bug_reports())
1592 model_print(" DETECTED BUG(S)");
1596 print_list(&action_trace);
1601 void ModelExecution::print_tail()
1603 model_print("Execution trace %d:\n", get_execution_number());
1605 sllnode<ModelAction*> *it;
1607 model_print("------------------------------------------------------------------------------------\n");
1608 model_print("# t Action type MO Location Value Rf CV\n");
1609 model_print("------------------------------------------------------------------------------------\n");
1611 unsigned int hash = 0;
1615 SnapList<ModelAction *> list;
1616 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1617 if (counter > length)
1620 ModelAction * act = it->getVal();
1621 list.push_front(act);
1625 for (it = list.begin();it != NULL;it=it->getNext()) {
1626 const ModelAction *act = it->getVal();
1627 if (act->get_seq_number() > 0)
1629 hash = hash^(hash<<3)^(it->getVal()->hash());
1631 model_print("HASH %u\n", hash);
1632 model_print("------------------------------------------------------------------------------------\n");
1636 * Add a Thread to the system for the first time. Should only be called once
1638 * @param t The Thread to add
1640 void ModelExecution::add_thread(Thread *t)
1642 unsigned int i = id_to_int(t->get_id());
1643 if (i >= thread_map.size())
1644 thread_map.resize(i + 1);
1646 if (!t->is_model_thread())
1647 scheduler->add_thread(t);
1651 * @brief Get a Thread reference by its ID
1652 * @param tid The Thread's ID
1653 * @return A Thread reference
1655 Thread * ModelExecution::get_thread(thread_id_t tid) const
1657 unsigned int i = id_to_int(tid);
1658 if (i < thread_map.size())
1659 return thread_map[i];
1664 * @brief Get a reference to the Thread in which a ModelAction was executed
1665 * @param act The ModelAction
1666 * @return A Thread reference
1668 Thread * ModelExecution::get_thread(const ModelAction *act) const
1670 return get_thread(act->get_tid());
1674 * @brief Get a Thread reference by its pthread ID
1675 * @param index The pthread's ID
1676 * @return A Thread reference
1678 Thread * ModelExecution::get_pthread(pthread_t pid) {
1679 // pid 1 is reserved for the main thread, pthread ids should start from 2
1681 return get_thread(pid);
1688 uint32_t thread_id = x.v;
1690 if (thread_id < pthread_counter + 1)
1691 return pthread_map[thread_id];
1697 * @brief Check if a Thread is currently enabled
1698 * @param t The Thread to check
1699 * @return True if the Thread is currently enabled
1701 bool ModelExecution::is_enabled(Thread *t) const
1703 return scheduler->is_enabled(t);
1707 * @brief Check if a Thread is currently enabled
1708 * @param tid The ID of the Thread to check
1709 * @return True if the Thread is currently enabled
1711 bool ModelExecution::is_enabled(thread_id_t tid) const
1713 return scheduler->is_enabled(tid);
1717 * @brief Select the next thread to execute based on the curren action
1719 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1720 * actions should be followed by the execution of their child thread. In either
1721 * case, the current action should determine the next thread schedule.
1723 * @param curr The current action
1724 * @return The next thread to run, if the current action will determine this
1725 * selection; otherwise NULL
1727 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1729 /* Do not split atomic RMW */
1730 if (curr->is_rmwr())
1731 return get_thread(curr);
1732 /* Follow CREATE with the created thread */
1733 /* which is not needed, because model.cc takes care of this */
1734 if (curr->get_type() == THREAD_CREATE)
1735 return curr->get_thread_operand();
1736 if (curr->get_type() == PTHREAD_CREATE) {
1737 return curr->get_thread_operand();
1743 * Takes the next step in the execution, if possible.
1744 * @param curr The current step to take
1745 * @return Returns the next Thread to run, if any; NULL if this execution
1748 Thread * ModelExecution::take_step(ModelAction *curr)
1750 Thread *curr_thrd = get_thread(curr);
1751 ASSERT(curr_thrd->get_state() == THREAD_READY);
1753 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1754 curr = check_current_action(curr);
1757 /* Process this action in ModelHistory for records */
1759 model->get_history()->process_action( curr, curr->get_tid() );
1761 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1762 scheduler->remove_thread(curr_thrd);
1764 return action_select_next_thread(curr);
1767 /** This method removes references to an Action before we delete it. */
1769 void ModelExecution::removeAction(ModelAction *act) {
1771 action_trace.removeAction(act);
1774 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1775 (*vec)[act->get_tid()].removeAction(act);
1777 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1778 sllnode<ModelAction *> * listref = act->getActionRef();
1779 if (listref != NULL) {
1780 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1781 list->erase(listref);
1783 } else if (act->is_wait()) {
1784 sllnode<ModelAction *> * listref = act->getActionRef();
1785 if (listref != NULL) {
1786 void *mutex_loc = (void *) act->get_value();
1787 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1789 } else if (act->is_free()) {
1790 sllnode<ModelAction *> * listref = act->getActionRef();
1791 if (listref != NULL) {
1792 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1793 (*vec)[act->get_tid()].erase(listref);
1796 //Clear it from last_sc_map
1797 if (obj_last_sc_map.get(act->get_location()) == act) {
1798 obj_last_sc_map.remove(act->get_location());
1801 //Remove from Cyclegraph
1802 mo_graph->freeAction(act);
1806 /** Computes clock vector that all running threads have already synchronized to. */
1808 ClockVector * ModelExecution::computeMinimalCV() {
1809 ClockVector *cvmin = NULL;
1810 //Thread 0 isn't a real thread, so skip it..
1811 for(unsigned int i = 1;i < thread_map.size();i++) {
1812 Thread * t = thread_map[i];
1813 if (t->is_complete())
1815 thread_id_t tid = int_to_id(i);
1816 ClockVector * cv = get_cv(tid);
1818 cvmin = new ClockVector(cv, NULL);
1820 cvmin->minmerge(cv);
1826 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1828 void ModelExecution::fixupLastAct(ModelAction *act) {
1829 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1830 newact->set_seq_number(get_next_seq_num());
1831 newact->create_cv(act);
1832 newact->set_last_fence_release(act->get_last_fence_release());
1833 add_action_to_lists(newact, false);
1836 /** Compute which actions to free. */
1838 void ModelExecution::collectActions() {
1839 if (priv->used_sequence_numbers < params->traceminsize)
1842 //Compute minimal clock vector for all live threads
1843 ClockVector *cvmin = computeMinimalCV();
1844 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1845 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1847 //Next walk action trace... When we hit an action, see if it is
1848 //invisible (e.g., earlier than the first before the minimum
1849 //clock for the thread... if so erase it and all previous
1850 //actions in cyclegraph
1851 sllnode<ModelAction*> * it;
1852 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1853 ModelAction *act = it->getVal();
1854 modelclock_t actseq = act->get_seq_number();
1856 //See if we are done
1857 if (actseq > maxtofree)
1860 thread_id_t act_tid = act->get_tid();
1861 modelclock_t tid_clock = cvmin->getClock(act_tid);
1863 //Free if it is invisible or we have set a flag to remove visible actions.
1864 if (actseq <= tid_clock || params->removevisible) {
1865 ModelAction * write;
1866 if (act->is_write()) {
1868 } else if (act->is_read()) {
1869 write = act->get_reads_from();
1873 //Mark everything earlier in MO graph to be freed
1874 CycleNode * cn = mo_graph->getNode_noCreate(write);
1876 queue->push_back(cn);
1877 while(!queue->empty()) {
1878 CycleNode * node = queue->back();
1880 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1881 CycleNode * prevnode = node->getInEdge(i);
1882 ModelAction * prevact = prevnode->getAction();
1883 if (prevact->get_type() != READY_FREE) {
1884 prevact->set_free();
1885 queue->push_back(prevnode);
1893 //We may need to remove read actions in the window we don't delete to preserve correctness.
1895 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1896 ModelAction *act = it2->getVal();
1897 //Do iteration early in case we delete the act
1899 bool islastact = false;
1900 ModelAction *lastact = get_last_action(act->get_tid());
1901 if (act == lastact) {
1902 Thread * th = get_thread(act);
1903 islastact = !th->is_complete();
1906 if (act->is_read()) {
1907 if (act->get_reads_from()->is_free()) {
1908 if (act->is_rmw()) {
1909 //Weaken a RMW from a freed store to a write
1910 act->set_type(ATOMIC_WRITE);
1922 //If we don't delete the action, we should remove references to release fences
1924 const ModelAction *rel_fence =act->get_last_fence_release();
1925 if (rel_fence != NULL) {
1926 modelclock_t relfenceseq = rel_fence->get_seq_number();
1927 thread_id_t relfence_tid = rel_fence->get_tid();
1928 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1929 //Remove references to irrelevant release fences
1930 if (relfenceseq <= tid_clock)
1931 act->set_last_fence_release(NULL);
1934 //Now we are in the window of old actions that we remove if possible
1935 for (;it != NULL;) {
1936 ModelAction *act = it->getVal();
1937 //Do iteration early since we may delete node...
1939 bool islastact = false;
1940 ModelAction *lastact = get_last_action(act->get_tid());
1941 if (act == lastact) {
1942 Thread * th = get_thread(act);
1943 islastact = !th->is_complete();
1946 if (act->is_read()) {
1947 if (act->get_reads_from()->is_free()) {
1948 if (act->is_rmw()) {
1949 act->set_type(ATOMIC_WRITE);
1959 } else if (act->is_free()) {
1966 } else if (act->is_write()) {
1967 //Do nothing with write that hasn't been marked to be freed
1968 } else if (islastact) {
1969 //Keep the last action for non-read/write actions
1970 } else if (act->is_fence()) {
1971 //Note that acquire fences can always be safely
1972 //removed, but could incur extra overheads in
1973 //traversals. Removing them before the cvmin seems
1974 //like a good compromise.
1976 //Release fences before the cvmin don't do anything
1977 //because everyone has already synchronized.
1979 //Sequentially fences before cvmin are redundant
1980 //because happens-before will enforce same
1983 modelclock_t actseq = act->get_seq_number();
1984 thread_id_t act_tid = act->get_tid();
1985 modelclock_t tid_clock = cvmin->getClock(act_tid);
1986 if (actseq <= tid_clock) {
1988 // Remove reference to act from thrd_last_fence_release
1989 int thread_id = id_to_int( act->get_tid() );
1990 if (thrd_last_fence_release[thread_id] == act) {
1991 thrd_last_fence_release[thread_id] = NULL;
1997 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1998 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1999 //need to keep most recent unlock/wait for each lock
2000 if(act->is_unlock() || act->is_wait()) {
2001 ModelAction * lastlock = get_last_unlock(act);
2002 if (lastlock != act) {
2007 } else if (act->is_create()) {
2008 if (act->get_thread_operand()->is_complete()) {
2020 //If we don't delete the action, we should remove references to release fences
2021 const ModelAction *rel_fence =act->get_last_fence_release();
2022 if (rel_fence != NULL) {
2023 modelclock_t relfenceseq = rel_fence->get_seq_number();
2024 thread_id_t relfence_tid = rel_fence->get_tid();
2025 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2026 //Remove references to irrelevant release fences
2027 if (relfenceseq <= tid_clock)
2028 act->set_last_fence_release(NULL);
2036 Fuzzer * ModelExecution::getFuzzer() {