12 #include "clockvector.h"
13 #include "cyclegraph.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
19 #include "newfuzzer.h"
22 static unsigned int atomic_load_count = 0;
23 static unsigned int atomic_store_count = 0;
24 static unsigned int atomic_rmw_count = 0;
26 static unsigned int atomic_fence_count = 0;
27 static unsigned int atomic_lock_count = 0;
28 static unsigned int atomic_trylock_count = 0;
29 static unsigned int atomic_unlock_count = 0;
30 static unsigned int atomic_notify_count = 0;
31 static unsigned int atomic_wait_count = 0;
32 static unsigned int atomic_timedwait_count = 0;
36 * Structure for holding small ModelChecker members that should be snapshotted
38 struct model_snapshot_members {
39 model_snapshot_members() :
40 /* First thread created will have id INITIAL_THREAD_ID */
41 next_thread_id(INITIAL_THREAD_ID),
42 used_sequence_numbers(0),
47 ~model_snapshot_members() {
48 for (unsigned int i = 0;i < bugs.size();i++)
53 unsigned int next_thread_id;
54 modelclock_t used_sequence_numbers;
55 SnapVector<bug_message *> bugs;
56 /** @brief Incorrectly-ordered synchronization was made */
62 /** @brief Constructor */
63 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
67 thread_map(2), /* We'll always need at least 2 threads */
72 condvar_waiters_map(),
79 thrd_last_fence_release(),
80 priv(new struct model_snapshot_members ()),
81 mo_graph(new CycleGraph()),
83 fuzzer(new NewFuzzer()),
89 /* Initialize a model-checker thread, for special ModelActions */
90 model_thread = new Thread(get_next_id());
91 add_thread(model_thread);
92 fuzzer->register_engine(m, this);
93 scheduler->register_engine(this);
95 pthread_key_create(&pthreadkey, tlsdestructor);
99 /** @brief Destructor */
100 ModelExecution::~ModelExecution()
102 for (unsigned int i = INITIAL_THREAD_ID;i < get_num_threads();i++)
103 delete get_thread(int_to_id(i));
109 int ModelExecution::get_execution_number() const
111 return model->get_execution_number();
114 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
116 SnapVector<action_list_t> *tmp = hash->get(ptr);
118 tmp = new SnapVector<action_list_t>();
124 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
126 simple_action_list_t *tmp = hash->get(ptr);
128 tmp = new simple_action_list_t();
134 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
136 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
138 tmp = new SnapVector<simple_action_list_t>();
145 * When vectors of action lists are reallocated due to resize, the root address of
146 * action lists may change. Hence we need to fix the parent pointer of the children
149 static void fixup_action_list(SnapVector<action_list_t> * vec)
151 for (uint i = 0;i < vec->size();i++) {
152 action_list_t * list = &(*vec)[i];
159 static inline void record_atomic_stats(ModelAction * act)
161 switch (act->get_type()) {
163 atomic_store_count++;
172 atomic_fence_count++;
178 atomic_trylock_count++;
181 atomic_unlock_count++;
183 case ATOMIC_NOTIFY_ONE:
184 case ATOMIC_NOTIFY_ALL:
185 atomic_notify_count++;
190 case ATOMIC_TIMEDWAIT:
191 atomic_timedwait_count++;
197 void print_atomic_accesses()
199 model_print("atomic store count: %u\n", atomic_store_count);
200 model_print("atomic load count: %u\n", atomic_load_count);
201 model_print("atomic rmw count: %u\n", atomic_rmw_count);
203 model_print("atomic fence count: %u\n", atomic_fence_count);
204 model_print("atomic lock count: %u\n", atomic_lock_count);
205 model_print("atomic trylock count: %u\n", atomic_trylock_count);
206 model_print("atomic unlock count: %u\n", atomic_unlock_count);
207 model_print("atomic notify count: %u\n", atomic_notify_count);
208 model_print("atomic wait count: %u\n", atomic_wait_count);
209 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
212 /** @return a thread ID for a new Thread */
213 thread_id_t ModelExecution::get_next_id()
215 return priv->next_thread_id++;
218 /** @return the number of user threads created during this execution */
219 unsigned int ModelExecution::get_num_threads() const
221 return priv->next_thread_id;
224 /** @return a sequence number for a new ModelAction */
225 modelclock_t ModelExecution::get_next_seq_num()
227 return ++priv->used_sequence_numbers;
230 /** @return a sequence number for a new ModelAction */
231 modelclock_t ModelExecution::get_curr_seq_num()
233 return priv->used_sequence_numbers;
236 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
237 void ModelExecution::restore_last_seq_num()
239 priv->used_sequence_numbers--;
243 * @brief Should the current action wake up a given thread?
245 * @param curr The current action
246 * @param thread The thread that we might wake up
247 * @return True, if we should wake up the sleeping thread; false otherwise
249 bool ModelExecution::should_wake_up(const ModelAction * asleep) const
251 /* The sleep is literally sleeping */
252 switch (asleep->get_type()) {
254 if (fuzzer->shouldWake(asleep))
258 case ATOMIC_TIMEDWAIT:
259 if (fuzzer->waitShouldWakeUp(asleep))
269 void ModelExecution::wake_up_sleeping_actions()
271 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
272 thread_id_t tid = int_to_id(i);
273 if (scheduler->is_sleep_set(tid)) {
274 Thread *thr = get_thread(tid);
275 ModelAction * pending = thr->get_pending();
276 if (should_wake_up(pending)) {
277 /* Remove this thread from sleep set */
278 scheduler->remove_sleep(thr);
280 if (pending->is_sleep()) {
281 thr->set_wakeup_state(true);
282 } else if (pending->is_wait()) {
283 thr->set_wakeup_state(true);
284 /* Remove this thread from list of waiters */
285 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location());
286 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
287 if (rit->getVal()->get_tid() == tid) {
293 /* Set ETIMEDOUT error */
294 if (pending->is_timedwait())
295 thr->set_return_value(ETIMEDOUT);
302 void ModelExecution::assert_bug(const char *msg)
304 priv->bugs.push_back(new bug_message(msg));
308 /** @return True, if any bugs have been reported for this execution */
309 bool ModelExecution::have_bug_reports() const
311 return priv->bugs.size() != 0;
314 SnapVector<bug_message *> * ModelExecution::get_bugs() const
320 * Check whether the current trace has triggered an assertion which should halt
323 * @return True, if the execution should be aborted; false otherwise
325 bool ModelExecution::has_asserted() const
327 return priv->asserted;
331 * Trigger a trace assertion which should cause this execution to be halted.
332 * This can be due to a detected bug or due to an infeasibility that should
335 void ModelExecution::set_assert()
337 priv->asserted = true;
341 * Check if we are in a deadlock. Should only be called at the end of an
342 * execution, although it should not give false positives in the middle of an
343 * execution (there should be some ENABLED thread).
345 * @return True if program is in a deadlock; false otherwise
347 bool ModelExecution::is_deadlocked() const
349 bool blocking_threads = false;
350 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
351 thread_id_t tid = int_to_id(i);
354 Thread *t = get_thread(tid);
355 if (!t->is_model_thread() && t->get_pending())
356 blocking_threads = true;
358 return blocking_threads;
362 * Check if this is a complete execution. That is, have all thread completed
363 * execution (rather than exiting because sleep sets have forced a redundant
366 * @return True if the execution is complete.
368 bool ModelExecution::is_complete_execution() const
370 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++)
371 if (is_enabled(int_to_id(i)))
376 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
377 uint64_t value = *((const uint64_t *) location);
378 modelclock_t storeclock;
379 thread_id_t storethread;
380 getStoreThreadAndClock(location, &storethread, &storeclock);
381 setAtomicStoreFlag(location);
382 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
383 act->set_seq_number(storeclock);
384 add_normal_write_to_lists(act);
385 add_write_to_lists(act);
386 w_modification_order(act);
388 model->get_history()->process_action(act, act->get_tid());
394 * Processes a read model action.
395 * @param curr is the read model action to process.
396 * @param rf_set is the set of model actions we can possibly read from
397 * @return True if the read can be pruned from the thread map list.
399 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
401 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
402 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
403 if (hasnonatomicstore) {
404 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
405 rf_set->push_back(nonatomicstore);
408 // Remove writes that violate read modification order
411 while (i < rf_set->size()) {
412 ModelAction * rf = (*rf_set)[i];
413 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
414 (*rf_set)[i] = rf_set->back();
421 int index = fuzzer->selectWrite(curr, rf_set);
423 ModelAction *rf = (*rf_set)[index];
426 bool canprune = false;
427 if (r_modification_order(curr, rf, priorset, &canprune)) {
428 for(unsigned int i=0;i<priorset->size();i++) {
429 mo_graph->addEdge((*priorset)[i], rf);
432 get_thread(curr)->set_return_value(rf->get_write_value());
434 //Update acquire fence clock vector
435 ClockVector * hbcv = get_hb_from_write(rf);
437 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
438 return canprune && (curr->get_type() == ATOMIC_READ);
441 (*rf_set)[index] = rf_set->back();
447 * Processes a lock, trylock, or unlock model action. @param curr is
448 * the read model action to process.
450 * The try lock operation checks whether the lock is taken. If not,
451 * it falls to the normal lock operation case. If so, it returns
454 * The lock operation has already been checked that it is enabled, so
455 * it just grabs the lock and synchronizes with the previous unlock.
457 * The unlock operation has to re-enable all of the threads that are
458 * waiting on the lock.
460 * @return True if synchronization was updated; false otherwise
462 bool ModelExecution::process_mutex(ModelAction *curr)
464 cdsc::mutex *mutex = curr->get_mutex();
465 struct cdsc::mutex_state *state = NULL;
468 state = mutex->get_state();
470 switch (curr->get_type()) {
471 case ATOMIC_TRYLOCK: {
472 bool success = !state->locked;
473 curr->set_try_lock(success);
475 get_thread(curr)->set_return_value(0);
478 get_thread(curr)->set_return_value(1);
480 //otherwise fall into the lock case
482 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
483 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
484 // assert_bug("Lock access before initialization");
486 // TODO: lock count for recursive mutexes
487 state->locked = get_thread(curr);
488 ModelAction *unlock = get_last_unlock(curr);
489 //synchronize with the previous unlock statement
490 if (unlock != NULL) {
491 synchronize(unlock, curr);
497 Thread *curr_thrd = get_thread(curr);
498 /* wake up the other threads */
499 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
500 Thread *t = get_thread(int_to_id(i));
501 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
505 /* unlock the lock - after checking who was waiting on it */
506 state->locked = NULL;
508 /* disable this thread */
509 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
510 waiters->push_back(curr);
511 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
513 if (fuzzer->waitShouldFail(curr)) // If wait should fail spuriously,
514 scheduler->add_sleep(curr_thrd); // place this thread into THREAD_SLEEP_SET
516 scheduler->sleep(curr_thrd);
520 case ATOMIC_TIMEDWAIT: {
521 Thread *curr_thrd = get_thread(curr);
522 if (!fuzzer->randomizeWaitTime(curr)) {
523 curr_thrd->set_return_value(ETIMEDOUT);
527 /* wake up the other threads */
528 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
529 Thread *t = get_thread(int_to_id(i));
530 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
534 /* unlock the lock - after checking who was waiting on it */
535 state->locked = NULL;
537 /* disable this thread */
538 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
539 waiters->push_back(curr);
540 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
541 scheduler->add_sleep(curr_thrd);
544 case ATOMIC_UNLOCK: {
545 // TODO: lock count for recursive mutexes
546 /* wake up the other threads */
547 Thread *curr_thrd = get_thread(curr);
548 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
549 Thread *t = get_thread(int_to_id(i));
550 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
554 /* unlock the lock - after checking who was waiting on it */
555 state->locked = NULL;
558 case ATOMIC_NOTIFY_ALL: {
559 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
560 //activate all the waiting threads
561 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
562 Thread * thread = get_thread(rit->getVal());
563 if (thread->get_state() != THREAD_COMPLETED)
564 scheduler->wake(thread);
565 thread->set_wakeup_state(true);
570 case ATOMIC_NOTIFY_ONE: {
571 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
572 if (waiters->size() != 0) {
573 Thread * thread = fuzzer->selectNotify(waiters);
574 if (thread->get_state() != THREAD_COMPLETED)
575 scheduler->wake(thread);
576 thread->set_wakeup_state(true);
588 * Process a write ModelAction
589 * @param curr The ModelAction to process
590 * @return True if the mo_graph was updated or promises were resolved
592 void ModelExecution::process_write(ModelAction *curr)
594 w_modification_order(curr);
595 get_thread(curr)->set_return_value(VALUE_NONE);
599 * Process a fence ModelAction
600 * @param curr The ModelAction to process
601 * @return True if synchronization was updated
603 void ModelExecution::process_fence(ModelAction *curr)
606 * fence-relaxed: no-op
607 * fence-release: only log the occurence (not in this function), for
608 * use in later synchronization
609 * fence-acquire (this function): search for hypothetical release
611 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
613 if (curr->is_acquire()) {
614 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
619 * @brief Process the current action for thread-related activity
621 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
622 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
623 * synchronization, etc. This function is a no-op for non-THREAD actions
624 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
626 * @param curr The current action
627 * @return True if synchronization was updated or a thread completed
629 void ModelExecution::process_thread_action(ModelAction *curr)
631 switch (curr->get_type()) {
632 case THREAD_CREATE: {
633 thrd_t *thrd = (thrd_t *)curr->get_location();
634 struct thread_params *params = (struct thread_params *)curr->get_value();
635 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
636 curr->set_thread_operand(th);
638 th->set_creation(curr);
641 case PTHREAD_CREATE: {
642 (*(uint32_t *)curr->get_location()) = pthread_counter++;
644 struct pthread_params *params = (struct pthread_params *)curr->get_value();
645 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
646 curr->set_thread_operand(th);
648 th->set_creation(curr);
650 if ( pthread_map.size() < pthread_counter )
651 pthread_map.resize( pthread_counter );
652 pthread_map[ pthread_counter-1 ] = th;
657 Thread *blocking = curr->get_thread_operand();
658 ModelAction *act = get_last_action(blocking->get_id());
659 synchronize(act, curr);
663 Thread *blocking = curr->get_thread_operand();
664 ModelAction *act = get_last_action(blocking->get_id());
665 synchronize(act, curr);
669 case THREADONLY_FINISH:
670 case THREAD_FINISH: {
671 Thread *th = get_thread(curr);
672 if (curr->get_type() == THREAD_FINISH &&
673 th == model->getInitThread()) {
679 /* Wake up any joining threads */
680 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
681 Thread *waiting = get_thread(int_to_id(i));
682 if (waiting->waiting_on() == th &&
683 waiting->get_pending()->is_thread_join())
684 scheduler->wake(waiting);
693 Thread *th = get_thread(curr);
694 th->set_pending(curr);
695 scheduler->add_sleep(th);
704 * Initialize the current action by performing one or more of the following
705 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
706 * manipulating backtracking sets, allocating and
707 * initializing clock vectors, and computing the promises to fulfill.
709 * @param curr The current action, as passed from the user context; may be
710 * freed/invalidated after the execution of this function, with a different
711 * action "returned" its place (pass-by-reference)
712 * @return True if curr is a newly-explored action; false otherwise
714 bool ModelExecution::initialize_curr_action(ModelAction **curr)
716 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
717 ModelAction *newcurr = process_rmw(*curr);
723 ModelAction *newcurr = *curr;
725 newcurr->set_seq_number(get_next_seq_num());
726 /* Always compute new clock vector */
727 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
729 /* Assign most recent release fence */
730 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
732 return true; /* This was a new ModelAction */
737 * @brief Establish reads-from relation between two actions
739 * Perform basic operations involved with establishing a concrete rf relation,
740 * including setting the ModelAction data and checking for release sequences.
742 * @param act The action that is reading (must be a read)
743 * @param rf The action from which we are reading (must be a write)
745 * @return True if this read established synchronization
748 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
751 ASSERT(rf->is_write());
753 act->set_read_from(rf);
754 if (act->is_acquire()) {
755 ClockVector *cv = get_hb_from_write(rf);
758 act->get_cv()->merge(cv);
763 * @brief Synchronizes two actions
765 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
766 * This function performs the synchronization as well as providing other hooks
767 * for other checks along with synchronization.
769 * @param first The left-hand side of the synchronizes-with relation
770 * @param second The right-hand side of the synchronizes-with relation
771 * @return True if the synchronization was successful (i.e., was consistent
772 * with the execution order); false otherwise
774 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
776 if (*second < *first) {
777 ASSERT(0); //This should not happend
780 return second->synchronize_with(first);
784 * @brief Check whether a model action is enabled.
786 * Checks whether an operation would be successful (i.e., is a lock already
787 * locked, or is the joined thread already complete).
789 * For yield-blocking, yields are never enabled.
791 * @param curr is the ModelAction to check whether it is enabled.
792 * @return a bool that indicates whether the action is enabled.
794 bool ModelExecution::check_action_enabled(ModelAction *curr) {
795 switch (curr->get_type()) {
797 cdsc::mutex *lock = curr->get_mutex();
798 struct cdsc::mutex_state *state = lock->get_state();
800 Thread *lock_owner = (Thread *)state->locked;
801 Thread *curr_thread = get_thread(curr);
802 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
812 Thread *blocking = curr->get_thread_operand();
813 if (!blocking->is_complete()) {
819 if (!fuzzer->shouldSleep(curr))
831 * This is the heart of the model checker routine. It performs model-checking
832 * actions corresponding to a given "current action." Among other processes, it
833 * calculates reads-from relationships, updates synchronization clock vectors,
834 * forms a memory_order constraints graph, and handles replay/backtrack
835 * execution when running permutations of previously-observed executions.
837 * @param curr The current action to process
838 * @return The ModelAction that is actually executed; may be different than
841 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
844 bool newly_explored = initialize_curr_action(&curr);
848 wake_up_sleeping_actions();
850 SnapVector<ModelAction *> * rf_set = NULL;
851 bool canprune = false;
852 /* Build may_read_from set for newly-created actions */
853 if (curr->is_read() && newly_explored) {
854 rf_set = build_may_read_from(curr);
855 canprune = process_read(curr, rf_set);
858 ASSERT(rf_set == NULL);
860 /* Add the action to lists if not the second part of a rmw */
861 if (newly_explored) {
863 record_atomic_stats(curr);
865 add_action_to_lists(curr, canprune);
868 if (curr->is_write())
869 add_write_to_lists(curr);
871 process_thread_action(curr);
873 if (curr->is_write())
876 if (curr->is_fence())
879 if (curr->is_mutex_op())
885 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
886 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
887 ModelAction *lastread = get_last_action(act->get_tid());
888 lastread->process_rmw(act);
890 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
896 * @brief Updates the mo_graph with the constraints imposed from the current
899 * Basic idea is the following: Go through each other thread and find
900 * the last action that happened before our read. Two cases:
902 * -# The action is a write: that write must either occur before
903 * the write we read from or be the write we read from.
904 * -# The action is a read: the write that that action read from
905 * must occur before the write we read from or be the same write.
907 * @param curr The current action. Must be a read.
908 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
909 * @param check_only If true, then only check whether the current action satisfies
910 * read modification order or not, without modifiying priorset and canprune.
912 * @return True if modification order edges were added; false otherwise
915 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
916 SnapVector<ModelAction *> * priorset, bool * canprune)
918 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
919 ASSERT(curr->is_read());
921 /* Last SC fence in the current thread */
922 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
924 int tid = curr->get_tid();
926 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
927 if ((int)thrd_lists->size() <= tid) {
928 uint oldsize = thrd_lists->size();
929 thrd_lists->resize(priv->next_thread_id);
930 for(uint i = oldsize;i < priv->next_thread_id;i++)
931 new (&(*thrd_lists)[i]) action_list_t();
933 fixup_action_list(thrd_lists);
936 ModelAction *prev_same_thread = NULL;
937 /* Iterate over all threads */
938 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
939 /* Last SC fence in thread tid */
940 ModelAction *last_sc_fence_thread_local = NULL;
942 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
944 /* Last SC fence in thread tid, before last SC fence in current thread */
945 ModelAction *last_sc_fence_thread_before = NULL;
946 if (last_sc_fence_local)
947 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
949 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
950 if (prev_same_thread != NULL &&
951 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
952 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
956 /* Iterate over actions in thread, starting from most recent */
957 action_list_t *list = &(*thrd_lists)[tid];
958 sllnode<ModelAction *> * rit;
959 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
960 ModelAction *act = rit->getVal();
965 /* Don't want to add reflexive edges on 'rf' */
966 if (act->equals(rf)) {
967 if (act->happens_before(curr))
973 if (act->is_write()) {
974 /* C++, Section 29.3 statement 5 */
975 if (curr->is_seqcst() && last_sc_fence_thread_local &&
976 *act < *last_sc_fence_thread_local) {
977 if (mo_graph->checkReachable(rf, act))
979 priorset->push_back(act);
982 /* C++, Section 29.3 statement 4 */
983 else if (act->is_seqcst() && last_sc_fence_local &&
984 *act < *last_sc_fence_local) {
985 if (mo_graph->checkReachable(rf, act))
987 priorset->push_back(act);
990 /* C++, Section 29.3 statement 6 */
991 else if (last_sc_fence_thread_before &&
992 *act < *last_sc_fence_thread_before) {
993 if (mo_graph->checkReachable(rf, act))
995 priorset->push_back(act);
1001 * Include at most one act per-thread that "happens
1004 if (act->happens_before(curr)) {
1006 if (last_sc_fence_local == NULL ||
1007 (*last_sc_fence_local < *act)) {
1008 prev_same_thread = act;
1011 if (act->is_write()) {
1012 if (mo_graph->checkReachable(rf, act))
1014 priorset->push_back(act);
1016 ModelAction *prevrf = act->get_reads_from();
1017 if (!prevrf->equals(rf)) {
1018 if (mo_graph->checkReachable(rf, prevrf))
1020 priorset->push_back(prevrf);
1022 if (act->get_tid() == curr->get_tid()) {
1023 //Can prune curr from obj list
1036 * Updates the mo_graph with the constraints imposed from the current write.
1038 * Basic idea is the following: Go through each other thread and find
1039 * the lastest action that happened before our write. Two cases:
1041 * (1) The action is a write => that write must occur before
1044 * (2) The action is a read => the write that that action read from
1045 * must occur before the current write.
1047 * This method also handles two other issues:
1049 * (I) Sequential Consistency: Making sure that if the current write is
1050 * seq_cst, that it occurs after the previous seq_cst write.
1052 * (II) Sending the write back to non-synchronizing reads.
1054 * @param curr The current action. Must be a write.
1055 * @param send_fv A vector for stashing reads to which we may pass our future
1056 * value. If NULL, then don't record any future values.
1057 * @return True if modification order edges were added; false otherwise
1059 void ModelExecution::w_modification_order(ModelAction *curr)
1061 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1063 ASSERT(curr->is_write());
1065 SnapList<ModelAction *> edgeset;
1067 if (curr->is_seqcst()) {
1068 /* We have to at least see the last sequentially consistent write,
1069 so we are initialized. */
1070 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1071 if (last_seq_cst != NULL) {
1072 edgeset.push_back(last_seq_cst);
1074 //update map for next query
1075 obj_last_sc_map.put(curr->get_location(), curr);
1078 /* Last SC fence in the current thread */
1079 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1081 /* Iterate over all threads */
1082 for (i = 0;i < thrd_lists->size();i++) {
1083 /* Last SC fence in thread i, before last SC fence in current thread */
1084 ModelAction *last_sc_fence_thread_before = NULL;
1085 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1086 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1088 /* Iterate over actions in thread, starting from most recent */
1089 action_list_t *list = &(*thrd_lists)[i];
1090 sllnode<ModelAction*>* rit;
1091 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1092 ModelAction *act = rit->getVal();
1095 * 1) If RMW and it actually read from something, then we
1096 * already have all relevant edges, so just skip to next
1099 * 2) If RMW and it didn't read from anything, we should
1100 * whatever edge we can get to speed up convergence.
1102 * 3) If normal write, we need to look at earlier actions, so
1103 * continue processing list.
1105 if (curr->is_rmw()) {
1106 if (curr->get_reads_from() != NULL)
1114 /* C++, Section 29.3 statement 7 */
1115 if (last_sc_fence_thread_before && act->is_write() &&
1116 *act < *last_sc_fence_thread_before) {
1117 edgeset.push_back(act);
1122 * Include at most one act per-thread that "happens
1125 if (act->happens_before(curr)) {
1127 * Note: if act is RMW, just add edge:
1129 * The following edge should be handled elsewhere:
1130 * readfrom(act) --mo--> act
1132 if (act->is_write())
1133 edgeset.push_back(act);
1134 else if (act->is_read()) {
1135 //if previous read accessed a null, just keep going
1136 edgeset.push_back(act->get_reads_from());
1142 mo_graph->addEdges(&edgeset, curr);
1147 * Computes the clock vector that happens before propagates from this write.
1149 * @param rf The action that might be part of a release sequence. Must be a
1151 * @return ClockVector of happens before relation.
1154 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1155 SnapVector<ModelAction *> * processset = NULL;
1156 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1157 ASSERT(rf->is_write());
1158 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1160 if (processset == NULL)
1161 processset = new SnapVector<ModelAction *>();
1162 processset->push_back(rf);
1165 int i = (processset == NULL) ? 0 : processset->size();
1167 ClockVector * vec = NULL;
1169 if (rf->get_rfcv() != NULL) {
1170 vec = rf->get_rfcv();
1171 } else if (rf->is_acquire() && rf->is_release()) {
1173 } else if (rf->is_release() && !rf->is_rmw()) {
1175 } else if (rf->is_release()) {
1176 //have rmw that is release and doesn't have a rfcv
1177 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1180 //operation that isn't release
1181 if (rf->get_last_fence_release()) {
1183 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1185 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1189 vec = new ClockVector(NULL, NULL);
1192 vec = new ClockVector(vec, NULL);
1199 rf = (*processset)[i];
1203 if (processset != NULL)
1209 * Performs various bookkeeping operations for the current ModelAction. For
1210 * instance, adds action to the per-object, per-thread action vector and to the
1211 * action trace list of all thread actions.
1213 * @param act is the ModelAction to add.
1215 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1217 int tid = id_to_int(act->get_tid());
1218 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1219 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1220 act->setActionRef(list->add_back(act));
1223 // Update action trace, a total order of all actions
1224 action_trace.addAction(act);
1227 // Update obj_thrd_map, a per location, per thread, order of actions
1228 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1229 if ((int)vec->size() <= tid) {
1230 uint oldsize = vec->size();
1231 vec->resize(priv->next_thread_id);
1232 for(uint i = oldsize;i < priv->next_thread_id;i++)
1233 new (&(*vec)[i]) action_list_t();
1235 fixup_action_list(vec);
1237 if (!canprune && (act->is_read() || act->is_write()))
1238 (*vec)[tid].addAction(act);
1240 // Update thrd_last_action, the last action taken by each thread
1241 if ((int)thrd_last_action.size() <= tid)
1242 thrd_last_action.resize(get_num_threads());
1243 thrd_last_action[tid] = act;
1245 // Update thrd_last_fence_release, the last release fence taken by each thread
1246 if (act->is_fence() && act->is_release()) {
1247 if ((int)thrd_last_fence_release.size() <= tid)
1248 thrd_last_fence_release.resize(get_num_threads());
1249 thrd_last_fence_release[tid] = act;
1252 if (act->is_wait()) {
1253 void *mutex_loc = (void *) act->get_value();
1254 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1258 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1259 list->addAction(act);
1262 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1263 act->create_cv(NULL);
1264 list->addAction(act);
1268 * Performs various bookkeeping operations for a normal write. The
1269 * complication is that we are typically inserting a normal write
1270 * lazily, so we need to insert it into the middle of lists.
1272 * @param act is the ModelAction to add.
1275 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1277 int tid = id_to_int(act->get_tid());
1278 insertIntoActionListAndSetCV(&action_trace, act);
1280 // Update obj_thrd_map, a per location, per thread, order of actions
1281 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1282 if (tid >= (int)vec->size()) {
1283 uint oldsize =vec->size();
1284 vec->resize(priv->next_thread_id);
1285 for(uint i=oldsize;i<priv->next_thread_id;i++)
1286 new (&(*vec)[i]) action_list_t();
1288 fixup_action_list(vec);
1290 insertIntoActionList(&(*vec)[tid],act);
1292 ModelAction * lastact = thrd_last_action[tid];
1293 // Update thrd_last_action, the last action taken by each thrad
1294 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1295 thrd_last_action[tid] = act;
1299 void ModelExecution::add_write_to_lists(ModelAction *write) {
1300 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1301 int tid = id_to_int(write->get_tid());
1302 if (tid >= (int)vec->size()) {
1303 uint oldsize =vec->size();
1304 vec->resize(priv->next_thread_id);
1305 for(uint i=oldsize;i<priv->next_thread_id;i++)
1306 new (&(*vec)[i]) simple_action_list_t();
1308 write->setActionRef((*vec)[tid].add_back(write));
1312 * @brief Get the last action performed by a particular Thread
1313 * @param tid The thread ID of the Thread in question
1314 * @return The last action in the thread
1316 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1318 int threadid = id_to_int(tid);
1319 if (threadid < (int)thrd_last_action.size())
1320 return thrd_last_action[id_to_int(tid)];
1326 * @brief Get the last fence release performed by a particular Thread
1327 * @param tid The thread ID of the Thread in question
1328 * @return The last fence release in the thread, if one exists; NULL otherwise
1330 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1332 int threadid = id_to_int(tid);
1333 if (threadid < (int)thrd_last_fence_release.size())
1334 return thrd_last_fence_release[id_to_int(tid)];
1340 * Gets the last memory_order_seq_cst write (in the total global sequence)
1341 * performed on a particular object (i.e., memory location), not including the
1343 * @param curr The current ModelAction; also denotes the object location to
1345 * @return The last seq_cst write
1347 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1349 void *location = curr->get_location();
1350 return obj_last_sc_map.get(location);
1354 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1355 * performed in a particular thread, prior to a particular fence.
1356 * @param tid The ID of the thread to check
1357 * @param before_fence The fence from which to begin the search; if NULL, then
1358 * search for the most recent fence in the thread.
1359 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1361 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1363 /* All fences should have location FENCE_LOCATION */
1364 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1369 sllnode<ModelAction*>* rit = list->end();
1372 for (;rit != NULL;rit=rit->getPrev())
1373 if (rit->getVal() == before_fence)
1376 ASSERT(rit->getVal() == before_fence);
1380 for (;rit != NULL;rit=rit->getPrev()) {
1381 ModelAction *act = rit->getVal();
1382 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1389 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1390 * location). This function identifies the mutex according to the current
1391 * action, which is presumed to perform on the same mutex.
1392 * @param curr The current ModelAction; also denotes the object location to
1394 * @return The last unlock operation
1396 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1398 void *location = curr->get_location();
1400 simple_action_list_t *list = obj_map.get(location);
1404 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1405 sllnode<ModelAction*>* rit;
1406 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1407 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1408 return rit->getVal();
1412 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1414 ModelAction *parent = get_last_action(tid);
1416 parent = get_thread(tid)->get_creation();
1421 * Returns the clock vector for a given thread.
1422 * @param tid The thread whose clock vector we want
1423 * @return Desired clock vector
1425 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1427 ModelAction *firstaction=get_parent_action(tid);
1428 return firstaction != NULL ? firstaction->get_cv() : NULL;
1431 bool valequals(uint64_t val1, uint64_t val2, int size) {
1434 return ((uint8_t)val1) == ((uint8_t)val2);
1436 return ((uint16_t)val1) == ((uint16_t)val2);
1438 return ((uint32_t)val1) == ((uint32_t)val2);
1448 * Build up an initial set of all past writes that this 'read' action may read
1449 * from, as well as any previously-observed future values that must still be valid.
1451 * @param curr is the current ModelAction that we are exploring; it must be a
1454 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1456 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1458 ASSERT(curr->is_read());
1460 ModelAction *last_sc_write = NULL;
1462 if (curr->is_seqcst())
1463 last_sc_write = get_last_seq_cst_write(curr);
1465 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1467 /* Iterate over all threads */
1468 if (thrd_lists != NULL)
1469 for (i = 0;i < thrd_lists->size();i++) {
1470 /* Iterate over actions in thread, starting from most recent */
1471 simple_action_list_t *list = &(*thrd_lists)[i];
1472 sllnode<ModelAction *> * rit;
1473 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1474 ModelAction *act = rit->getVal();
1479 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1480 bool allow_read = true;
1482 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1485 /* Need to check whether we will have two RMW reading from the same value */
1486 if (curr->is_rmwr()) {
1487 /* It is okay if we have a failing CAS */
1488 if (!curr->is_rmwrcas() ||
1489 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1490 //Need to make sure we aren't the second RMW
1491 CycleNode * node = mo_graph->getNode_noCreate(act);
1492 if (node != NULL && node->getRMW() != NULL) {
1493 //we are the second RMW
1500 /* Only add feasible reads */
1501 rf_set->push_back(act);
1504 /* Include at most one act per-thread that "happens before" curr */
1505 if (act->happens_before(curr))
1510 if (DBG_ENABLED()) {
1511 model_print("Reached read action:\n");
1513 model_print("End printing read_from_past\n");
1518 static void print_list(action_list_t *list)
1520 sllnode<ModelAction*> *it;
1522 model_print("------------------------------------------------------------------------------------\n");
1523 model_print("# t Action type MO Location Value Rf CV\n");
1524 model_print("------------------------------------------------------------------------------------\n");
1526 unsigned int hash = 0;
1528 for (it = list->begin();it != NULL;it=it->getNext()) {
1529 const ModelAction *act = it->getVal();
1530 if (act->get_seq_number() > 0)
1532 hash = hash^(hash<<3)^(it->getVal()->hash());
1534 model_print("HASH %u\n", hash);
1535 model_print("------------------------------------------------------------------------------------\n");
1538 #if SUPPORT_MOD_ORDER_DUMP
1539 void ModelExecution::dumpGraph(char *filename)
1542 sprintf(buffer, "%s.dot", filename);
1543 FILE *file = fopen(buffer, "w");
1544 fprintf(file, "digraph %s {\n", filename);
1545 mo_graph->dumpNodes(file);
1546 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1548 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1549 ModelAction *act = it->getVal();
1550 if (act->is_read()) {
1551 mo_graph->dot_print_node(file, act);
1552 mo_graph->dot_print_edge(file,
1553 act->get_reads_from(),
1555 "label=\"rf\", color=red, weight=2");
1557 if (thread_array[act->get_tid()]) {
1558 mo_graph->dot_print_edge(file,
1559 thread_array[id_to_int(act->get_tid())],
1561 "label=\"sb\", color=blue, weight=400");
1564 thread_array[act->get_tid()] = act;
1566 fprintf(file, "}\n");
1567 model_free(thread_array);
1572 /** @brief Prints an execution trace summary. */
1573 void ModelExecution::print_summary()
1575 #if SUPPORT_MOD_ORDER_DUMP
1576 char buffername[100];
1577 sprintf(buffername, "exec%04u", get_execution_number());
1578 mo_graph->dumpGraphToFile(buffername);
1579 sprintf(buffername, "graph%04u", get_execution_number());
1580 dumpGraph(buffername);
1583 model_print("Execution trace %d:", get_execution_number());
1584 if (scheduler->all_threads_sleeping())
1585 model_print(" SLEEP-SET REDUNDANT");
1586 if (have_bug_reports())
1587 model_print(" DETECTED BUG(S)");
1591 print_list(&action_trace);
1596 void ModelExecution::print_tail()
1598 model_print("Execution trace %d:\n", get_execution_number());
1600 sllnode<ModelAction*> *it;
1602 model_print("------------------------------------------------------------------------------------\n");
1603 model_print("# t Action type MO Location Value Rf CV\n");
1604 model_print("------------------------------------------------------------------------------------\n");
1606 unsigned int hash = 0;
1610 SnapList<ModelAction *> list;
1611 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1612 if (counter > length)
1615 ModelAction * act = it->getVal();
1616 list.push_front(act);
1620 for (it = list.begin();it != NULL;it=it->getNext()) {
1621 const ModelAction *act = it->getVal();
1622 if (act->get_seq_number() > 0)
1624 hash = hash^(hash<<3)^(it->getVal()->hash());
1626 model_print("HASH %u\n", hash);
1627 model_print("------------------------------------------------------------------------------------\n");
1631 * Add a Thread to the system for the first time. Should only be called once
1633 * @param t The Thread to add
1635 void ModelExecution::add_thread(Thread *t)
1637 unsigned int i = id_to_int(t->get_id());
1638 if (i >= thread_map.size())
1639 thread_map.resize(i + 1);
1641 if (!t->is_model_thread())
1642 scheduler->add_thread(t);
1646 * @brief Get a Thread reference by its ID
1647 * @param tid The Thread's ID
1648 * @return A Thread reference
1650 Thread * ModelExecution::get_thread(thread_id_t tid) const
1652 unsigned int i = id_to_int(tid);
1653 if (i < thread_map.size())
1654 return thread_map[i];
1659 * @brief Get a reference to the Thread in which a ModelAction was executed
1660 * @param act The ModelAction
1661 * @return A Thread reference
1663 Thread * ModelExecution::get_thread(const ModelAction *act) const
1665 return get_thread(act->get_tid());
1669 * @brief Get a Thread reference by its pthread ID
1670 * @param index The pthread's ID
1671 * @return A Thread reference
1673 Thread * ModelExecution::get_pthread(pthread_t pid) {
1674 // pid 1 is reserved for the main thread, pthread ids should start from 2
1676 return get_thread(pid);
1683 uint32_t thread_id = x.v;
1685 if (thread_id < pthread_counter + 1)
1686 return pthread_map[thread_id];
1692 * @brief Check if a Thread is currently enabled
1693 * @param t The Thread to check
1694 * @return True if the Thread is currently enabled
1696 bool ModelExecution::is_enabled(Thread *t) const
1698 return scheduler->is_enabled(t);
1702 * @brief Check if a Thread is currently enabled
1703 * @param tid The ID of the Thread to check
1704 * @return True if the Thread is currently enabled
1706 bool ModelExecution::is_enabled(thread_id_t tid) const
1708 return scheduler->is_enabled(tid);
1712 * @brief Select the next thread to execute based on the curren action
1714 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1715 * actions should be followed by the execution of their child thread. In either
1716 * case, the current action should determine the next thread schedule.
1718 * @param curr The current action
1719 * @return The next thread to run, if the current action will determine this
1720 * selection; otherwise NULL
1722 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1724 /* Do not split atomic RMW */
1725 if (curr->is_rmwr())
1726 return get_thread(curr);
1727 /* Follow CREATE with the created thread */
1728 /* which is not needed, because model.cc takes care of this */
1729 if (curr->get_type() == THREAD_CREATE)
1730 return curr->get_thread_operand();
1731 if (curr->get_type() == PTHREAD_CREATE) {
1732 return curr->get_thread_operand();
1738 * Takes the next step in the execution, if possible.
1739 * @param curr The current step to take
1740 * @return Returns the next Thread to run, if any; NULL if this execution
1743 Thread * ModelExecution::take_step(ModelAction *curr)
1745 Thread *curr_thrd = get_thread(curr);
1746 ASSERT(curr_thrd->get_state() == THREAD_READY);
1748 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1749 curr = check_current_action(curr);
1752 /* Process this action in ModelHistory for records */
1754 model->get_history()->process_action( curr, curr->get_tid() );
1756 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1757 scheduler->remove_thread(curr_thrd);
1759 return action_select_next_thread(curr);
1762 /** This method removes references to an Action before we delete it. */
1764 void ModelExecution::removeAction(ModelAction *act) {
1766 action_trace.removeAction(act);
1769 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1770 (*vec)[act->get_tid()].removeAction(act);
1772 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1773 sllnode<ModelAction *> * listref = act->getActionRef();
1774 if (listref != NULL) {
1775 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1776 list->erase(listref);
1778 } else if (act->is_wait()) {
1779 sllnode<ModelAction *> * listref = act->getActionRef();
1780 if (listref != NULL) {
1781 void *mutex_loc = (void *) act->get_value();
1782 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1784 } else if (act->is_free()) {
1785 sllnode<ModelAction *> * listref = act->getActionRef();
1786 if (listref != NULL) {
1787 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1788 (*vec)[act->get_tid()].erase(listref);
1791 //Clear it from last_sc_map
1792 if (obj_last_sc_map.get(act->get_location()) == act) {
1793 obj_last_sc_map.remove(act->get_location());
1796 //Remove from Cyclegraph
1797 mo_graph->freeAction(act);
1801 /** Computes clock vector that all running threads have already synchronized to. */
1803 ClockVector * ModelExecution::computeMinimalCV() {
1804 ClockVector *cvmin = NULL;
1805 //Thread 0 isn't a real thread, so skip it..
1806 for(unsigned int i = 1;i < thread_map.size();i++) {
1807 Thread * t = thread_map[i];
1808 if (t->is_complete())
1810 thread_id_t tid = int_to_id(i);
1811 ClockVector * cv = get_cv(tid);
1813 cvmin = new ClockVector(cv, NULL);
1815 cvmin->minmerge(cv);
1821 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1823 void ModelExecution::fixupLastAct(ModelAction *act) {
1824 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1825 newact->set_seq_number(get_next_seq_num());
1826 newact->create_cv(act);
1827 newact->set_last_fence_release(act->get_last_fence_release());
1828 add_action_to_lists(newact, false);
1831 /** Compute which actions to free. */
1833 void ModelExecution::collectActions() {
1834 if (priv->used_sequence_numbers < params->traceminsize)
1837 //Compute minimal clock vector for all live threads
1838 ClockVector *cvmin = computeMinimalCV();
1839 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1840 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1842 //Next walk action trace... When we hit an action, see if it is
1843 //invisible (e.g., earlier than the first before the minimum
1844 //clock for the thread... if so erase it and all previous
1845 //actions in cyclegraph
1846 sllnode<ModelAction*> * it;
1847 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1848 ModelAction *act = it->getVal();
1849 modelclock_t actseq = act->get_seq_number();
1851 //See if we are done
1852 if (actseq > maxtofree)
1855 thread_id_t act_tid = act->get_tid();
1856 modelclock_t tid_clock = cvmin->getClock(act_tid);
1858 //Free if it is invisible or we have set a flag to remove visible actions.
1859 if (actseq <= tid_clock || params->removevisible) {
1860 ModelAction * write;
1861 if (act->is_write()) {
1863 } else if (act->is_read()) {
1864 write = act->get_reads_from();
1868 //Mark everything earlier in MO graph to be freed
1869 CycleNode * cn = mo_graph->getNode_noCreate(write);
1871 queue->push_back(cn);
1872 while(!queue->empty()) {
1873 CycleNode * node = queue->back();
1875 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1876 CycleNode * prevnode = node->getInEdge(i);
1877 ModelAction * prevact = prevnode->getAction();
1878 if (prevact->get_type() != READY_FREE) {
1879 prevact->set_free();
1880 queue->push_back(prevnode);
1888 //We may need to remove read actions in the window we don't delete to preserve correctness.
1890 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1891 ModelAction *act = it2->getVal();
1892 //Do iteration early in case we delete the act
1894 bool islastact = false;
1895 ModelAction *lastact = get_last_action(act->get_tid());
1896 if (act == lastact) {
1897 Thread * th = get_thread(act);
1898 islastact = !th->is_complete();
1901 if (act->is_read()) {
1902 if (act->get_reads_from()->is_free()) {
1903 if (act->is_rmw()) {
1904 //Weaken a RMW from a freed store to a write
1905 act->set_type(ATOMIC_WRITE);
1917 //If we don't delete the action, we should remove references to release fences
1919 const ModelAction *rel_fence =act->get_last_fence_release();
1920 if (rel_fence != NULL) {
1921 modelclock_t relfenceseq = rel_fence->get_seq_number();
1922 thread_id_t relfence_tid = rel_fence->get_tid();
1923 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1924 //Remove references to irrelevant release fences
1925 if (relfenceseq <= tid_clock)
1926 act->set_last_fence_release(NULL);
1929 //Now we are in the window of old actions that we remove if possible
1930 for (;it != NULL;) {
1931 ModelAction *act = it->getVal();
1932 //Do iteration early since we may delete node...
1934 bool islastact = false;
1935 ModelAction *lastact = get_last_action(act->get_tid());
1936 if (act == lastact) {
1937 Thread * th = get_thread(act);
1938 islastact = !th->is_complete();
1941 if (act->is_read()) {
1942 if (act->get_reads_from()->is_free()) {
1943 if (act->is_rmw()) {
1944 act->set_type(ATOMIC_WRITE);
1954 } else if (act->is_free()) {
1961 } else if (act->is_write()) {
1962 //Do nothing with write that hasn't been marked to be freed
1963 } else if (islastact) {
1964 //Keep the last action for non-read/write actions
1965 } else if (act->is_fence()) {
1966 //Note that acquire fences can always be safely
1967 //removed, but could incur extra overheads in
1968 //traversals. Removing them before the cvmin seems
1969 //like a good compromise.
1971 //Release fences before the cvmin don't do anything
1972 //because everyone has already synchronized.
1974 //Sequentially fences before cvmin are redundant
1975 //because happens-before will enforce same
1978 modelclock_t actseq = act->get_seq_number();
1979 thread_id_t act_tid = act->get_tid();
1980 modelclock_t tid_clock = cvmin->getClock(act_tid);
1981 if (actseq <= tid_clock) {
1983 // Remove reference to act from thrd_last_fence_release
1984 int thread_id = id_to_int( act->get_tid() );
1985 if (thrd_last_fence_release[thread_id] == act) {
1986 thrd_last_fence_release[thread_id] = NULL;
1992 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1993 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1994 //need to keep most recent unlock/wait for each lock
1995 if(act->is_unlock() || act->is_wait()) {
1996 ModelAction * lastlock = get_last_unlock(act);
1997 if (lastlock != act) {
2002 } else if (act->is_create()) {
2003 if (act->get_thread_operand()->is_complete()) {
2015 //If we don't delete the action, we should remove references to release fences
2016 const ModelAction *rel_fence =act->get_last_fence_release();
2017 if (rel_fence != NULL) {
2018 modelclock_t relfenceseq = rel_fence->get_seq_number();
2019 thread_id_t relfence_tid = rel_fence->get_tid();
2020 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2021 //Remove references to irrelevant release fences
2022 if (relfenceseq <= tid_clock)
2023 act->set_last_fence_release(NULL);
2031 Fuzzer * ModelExecution::getFuzzer() {