11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 static unsigned int atomic_load_count = 0;
24 static unsigned int atomic_store_count = 0;
25 static unsigned int atomic_rmw_count = 0;
27 static unsigned int atomic_fence_count = 0;
28 static unsigned int atomic_lock_count = 0;
29 static unsigned int atomic_trylock_count = 0;
30 static unsigned int atomic_unlock_count = 0;
31 static unsigned int atomic_notify_count = 0;
32 static unsigned int atomic_wait_count = 0;
33 static unsigned int atomic_timedwait_count = 0;
37 * Structure for holding small ModelChecker members that should be snapshotted
39 struct model_snapshot_members {
40 model_snapshot_members() :
41 /* First thread created will have id INITIAL_THREAD_ID */
42 next_thread_id(INITIAL_THREAD_ID),
43 used_sequence_numbers(0),
48 ~model_snapshot_members() {
49 for (unsigned int i = 0;i < bugs.size();i++)
54 unsigned int next_thread_id;
55 modelclock_t used_sequence_numbers;
56 SnapVector<bug_message *> bugs;
57 /** @brief Incorrectly-ordered synchronization was made */
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
68 thread_map(2), /* We'll always need at least 2 threads */
73 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 priv(new struct model_snapshot_members ()),
82 mo_graph(new CycleGraph()),
84 fuzzer(new NewFuzzer()),
90 /* Initialize a model-checker thread, for special ModelActions */
91 model_thread = new Thread(get_next_id());
92 add_thread(model_thread);
93 fuzzer->register_engine(m, this);
94 scheduler->register_engine(this);
96 pthread_key_create(&pthreadkey, tlsdestructor);
100 /** @brief Destructor */
101 ModelExecution::~ModelExecution()
103 for (unsigned int i = 0;i < get_num_threads();i++)
104 delete get_thread(int_to_id(i));
110 int ModelExecution::get_execution_number() const
112 return model->get_execution_number();
115 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
117 SnapVector<action_list_t> *tmp = hash->get(ptr);
119 tmp = new SnapVector<action_list_t>();
125 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
127 simple_action_list_t *tmp = hash->get(ptr);
129 tmp = new simple_action_list_t();
135 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
137 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
139 tmp = new SnapVector<simple_action_list_t>();
146 * When vectors of action lists are reallocated due to resize, the root address of
147 * action lists may change. Hence we need to fix the parent pointer of the children
150 static void fixup_action_list(SnapVector<action_list_t> * vec)
152 for (uint i = 0;i < vec->size();i++) {
153 action_list_t * list = &(*vec)[i];
160 static inline void record_atomic_stats(ModelAction * act)
162 switch (act->get_type()) {
164 atomic_store_count++;
173 atomic_fence_count++;
179 atomic_trylock_count++;
182 atomic_unlock_count++;
184 case ATOMIC_NOTIFY_ONE:
185 case ATOMIC_NOTIFY_ALL:
186 atomic_notify_count++;
191 case ATOMIC_TIMEDWAIT:
192 atomic_timedwait_count++;
198 void print_atomic_accesses()
200 model_print("atomic store count: %u\n", atomic_store_count);
201 model_print("atomic load count: %u\n", atomic_load_count);
202 model_print("atomic rmw count: %u\n", atomic_rmw_count);
204 model_print("atomic fence count: %u\n", atomic_fence_count);
205 model_print("atomic lock count: %u\n", atomic_lock_count);
206 model_print("atomic trylock count: %u\n", atomic_trylock_count);
207 model_print("atomic unlock count: %u\n", atomic_unlock_count);
208 model_print("atomic notify count: %u\n", atomic_notify_count);
209 model_print("atomic wait count: %u\n", atomic_wait_count);
210 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
213 /** @return a thread ID for a new Thread */
214 thread_id_t ModelExecution::get_next_id()
216 return priv->next_thread_id++;
219 /** @return the number of user threads created during this execution */
220 unsigned int ModelExecution::get_num_threads() const
222 return priv->next_thread_id;
225 /** @return a sequence number for a new ModelAction */
226 modelclock_t ModelExecution::get_next_seq_num()
228 return ++priv->used_sequence_numbers;
231 /** @return a sequence number for a new ModelAction */
232 modelclock_t ModelExecution::get_curr_seq_num()
234 return priv->used_sequence_numbers;
237 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
238 void ModelExecution::restore_last_seq_num()
240 priv->used_sequence_numbers--;
244 * @brief Should the current action wake up a given thread?
246 * @param curr The current action
247 * @param thread The thread that we might wake up
248 * @return True, if we should wake up the sleeping thread; false otherwise
250 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
252 const ModelAction *asleep = thread->get_pending();
253 /* Don't allow partial RMW to wake anyone up */
256 /* Synchronizing actions may have been backtracked */
257 if (asleep->could_synchronize_with(curr))
259 /* All acquire/release fences and fence-acquire/store-release */
260 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
262 /* Fence-release + store can awake load-acquire on the same location */
263 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
264 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
265 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
268 /* The sleep is literally sleeping */
269 if (asleep->is_sleep()) {
270 if (fuzzer->shouldWake(asleep))
277 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
279 for (unsigned int i = 0;i < get_num_threads();i++) {
280 Thread *thr = get_thread(int_to_id(i));
281 if (scheduler->is_sleep_set(thr)) {
282 if (should_wake_up(curr, thr)) {
283 /* Remove this thread from sleep set */
284 scheduler->remove_sleep(thr);
285 if (thr->get_pending()->is_sleep())
286 thr->set_wakeup_state(true);
292 void ModelExecution::assert_bug(const char *msg)
294 priv->bugs.push_back(new bug_message(msg));
298 /** @return True, if any bugs have been reported for this execution */
299 bool ModelExecution::have_bug_reports() const
301 return priv->bugs.size() != 0;
304 SnapVector<bug_message *> * ModelExecution::get_bugs() const
310 * Check whether the current trace has triggered an assertion which should halt
313 * @return True, if the execution should be aborted; false otherwise
315 bool ModelExecution::has_asserted() const
317 return priv->asserted;
321 * Trigger a trace assertion which should cause this execution to be halted.
322 * This can be due to a detected bug or due to an infeasibility that should
325 void ModelExecution::set_assert()
327 priv->asserted = true;
331 * Check if we are in a deadlock. Should only be called at the end of an
332 * execution, although it should not give false positives in the middle of an
333 * execution (there should be some ENABLED thread).
335 * @return True if program is in a deadlock; false otherwise
337 bool ModelExecution::is_deadlocked() const
339 bool blocking_threads = false;
340 for (unsigned int i = 0;i < get_num_threads();i++) {
341 thread_id_t tid = int_to_id(i);
344 Thread *t = get_thread(tid);
345 if (!t->is_model_thread() && t->get_pending())
346 blocking_threads = true;
348 return blocking_threads;
352 * Check if this is a complete execution. That is, have all thread completed
353 * execution (rather than exiting because sleep sets have forced a redundant
356 * @return True if the execution is complete.
358 bool ModelExecution::is_complete_execution() const
360 for (unsigned int i = 0;i < get_num_threads();i++)
361 if (is_enabled(int_to_id(i)))
366 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
367 uint64_t value = *((const uint64_t *) location);
368 modelclock_t storeclock;
369 thread_id_t storethread;
370 getStoreThreadAndClock(location, &storethread, &storeclock);
371 setAtomicStoreFlag(location);
372 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
373 act->set_seq_number(storeclock);
374 add_normal_write_to_lists(act);
375 add_write_to_lists(act);
376 w_modification_order(act);
378 model->get_history()->process_action(act, act->get_tid());
384 * Processes a read model action.
385 * @param curr is the read model action to process.
386 * @param rf_set is the set of model actions we can possibly read from
387 * @return True if the read can be pruned from the thread map list.
389 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
391 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
392 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
393 if (hasnonatomicstore) {
394 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
395 rf_set->push_back(nonatomicstore);
398 // Remove writes that violate read modification order
401 while (i < rf_set->size()) {
402 ModelAction * rf = (*rf_set)[i];
403 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
404 (*rf_set)[i] = rf_set->back();
411 int index = fuzzer->selectWrite(curr, rf_set);
413 ModelAction *rf = (*rf_set)[index];
416 bool canprune = false;
417 if (r_modification_order(curr, rf, priorset, &canprune)) {
418 for(unsigned int i=0;i<priorset->size();i++) {
419 mo_graph->addEdge((*priorset)[i], rf);
422 get_thread(curr)->set_return_value(curr->get_return_value());
424 //Update acquire fence clock vector
425 ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
427 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
428 return canprune && (curr->get_type() == ATOMIC_READ);
431 (*rf_set)[index] = rf_set->back();
437 * Processes a lock, trylock, or unlock model action. @param curr is
438 * the read model action to process.
440 * The try lock operation checks whether the lock is taken. If not,
441 * it falls to the normal lock operation case. If so, it returns
444 * The lock operation has already been checked that it is enabled, so
445 * it just grabs the lock and synchronizes with the previous unlock.
447 * The unlock operation has to re-enable all of the threads that are
448 * waiting on the lock.
450 * @return True if synchronization was updated; false otherwise
452 bool ModelExecution::process_mutex(ModelAction *curr)
454 cdsc::mutex *mutex = curr->get_mutex();
455 struct cdsc::mutex_state *state = NULL;
458 state = mutex->get_state();
460 switch (curr->get_type()) {
461 case ATOMIC_TRYLOCK: {
462 bool success = !state->locked;
463 curr->set_try_lock(success);
465 get_thread(curr)->set_return_value(0);
468 get_thread(curr)->set_return_value(1);
470 //otherwise fall into the lock case
472 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
473 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
474 // assert_bug("Lock access before initialization");
476 // TODO: lock count for recursive mutexes
477 state->locked = get_thread(curr);
478 ModelAction *unlock = get_last_unlock(curr);
479 //synchronize with the previous unlock statement
480 if (unlock != NULL) {
481 synchronize(unlock, curr);
487 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
488 if (fuzzer->shouldWait(curr)) {
489 /* wake up the other threads */
490 for (unsigned int i = 0;i < get_num_threads();i++) {
491 Thread *t = get_thread(int_to_id(i));
492 Thread *curr_thrd = get_thread(curr);
493 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
497 /* unlock the lock - after checking who was waiting on it */
498 state->locked = NULL;
500 /* remove old wait action and disable this thread */
501 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
502 for (sllnode<ModelAction *> * it = waiters->begin();it != NULL;it = it->getNext()) {
503 ModelAction * wait = it->getVal();
504 if (wait->get_tid() == curr->get_tid()) {
510 waiters->push_back(curr);
511 scheduler->sleep(get_thread(curr));
516 case ATOMIC_TIMEDWAIT:
517 case ATOMIC_UNLOCK: {
518 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
519 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
520 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
521 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
522 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
523 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
524 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
525 //MUST EVENMTUALLY RELEASE...
527 // TODO: lock count for recursive mutexes
528 /* wake up the other threads */
529 for (unsigned int i = 0;i < get_num_threads();i++) {
530 Thread *t = get_thread(int_to_id(i));
531 Thread *curr_thrd = get_thread(curr);
532 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
536 /* unlock the lock - after checking who was waiting on it */
537 state->locked = NULL;
540 case ATOMIC_NOTIFY_ALL: {
541 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
542 //activate all the waiting threads
543 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
544 scheduler->wake(get_thread(rit->getVal()));
549 case ATOMIC_NOTIFY_ONE: {
550 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
551 if (waiters->size() != 0) {
552 Thread * thread = fuzzer->selectNotify(waiters);
553 scheduler->wake(thread);
565 * Process a write ModelAction
566 * @param curr The ModelAction to process
567 * @return True if the mo_graph was updated or promises were resolved
569 void ModelExecution::process_write(ModelAction *curr)
571 w_modification_order(curr);
572 get_thread(curr)->set_return_value(VALUE_NONE);
576 * Process a fence ModelAction
577 * @param curr The ModelAction to process
578 * @return True if synchronization was updated
580 void ModelExecution::process_fence(ModelAction *curr)
583 * fence-relaxed: no-op
584 * fence-release: only log the occurence (not in this function), for
585 * use in later synchronization
586 * fence-acquire (this function): search for hypothetical release
588 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
590 if (curr->is_acquire()) {
591 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
596 * @brief Process the current action for thread-related activity
598 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
599 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
600 * synchronization, etc. This function is a no-op for non-THREAD actions
601 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
603 * @param curr The current action
604 * @return True if synchronization was updated or a thread completed
606 void ModelExecution::process_thread_action(ModelAction *curr)
608 switch (curr->get_type()) {
609 case THREAD_CREATE: {
610 thrd_t *thrd = (thrd_t *)curr->get_location();
611 struct thread_params *params = (struct thread_params *)curr->get_value();
612 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
613 curr->set_thread_operand(th);
615 th->set_creation(curr);
618 case PTHREAD_CREATE: {
619 (*(uint32_t *)curr->get_location()) = pthread_counter++;
621 struct pthread_params *params = (struct pthread_params *)curr->get_value();
622 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
623 curr->set_thread_operand(th);
625 th->set_creation(curr);
627 if ( pthread_map.size() < pthread_counter )
628 pthread_map.resize( pthread_counter );
629 pthread_map[ pthread_counter-1 ] = th;
634 Thread *blocking = curr->get_thread_operand();
635 ModelAction *act = get_last_action(blocking->get_id());
636 synchronize(act, curr);
640 Thread *blocking = curr->get_thread_operand();
641 ModelAction *act = get_last_action(blocking->get_id());
642 synchronize(act, curr);
646 case THREADONLY_FINISH:
647 case THREAD_FINISH: {
648 Thread *th = get_thread(curr);
649 if (curr->get_type() == THREAD_FINISH &&
650 th == model->getInitThread()) {
656 /* Wake up any joining threads */
657 for (unsigned int i = 0;i < get_num_threads();i++) {
658 Thread *waiting = get_thread(int_to_id(i));
659 if (waiting->waiting_on() == th &&
660 waiting->get_pending()->is_thread_join())
661 scheduler->wake(waiting);
670 Thread *th = get_thread(curr);
671 th->set_pending(curr);
672 scheduler->add_sleep(th);
681 * Initialize the current action by performing one or more of the following
682 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
683 * manipulating backtracking sets, allocating and
684 * initializing clock vectors, and computing the promises to fulfill.
686 * @param curr The current action, as passed from the user context; may be
687 * freed/invalidated after the execution of this function, with a different
688 * action "returned" its place (pass-by-reference)
689 * @return True if curr is a newly-explored action; false otherwise
691 bool ModelExecution::initialize_curr_action(ModelAction **curr)
693 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
694 ModelAction *newcurr = process_rmw(*curr);
700 ModelAction *newcurr = *curr;
702 newcurr->set_seq_number(get_next_seq_num());
703 /* Always compute new clock vector */
704 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
706 /* Assign most recent release fence */
707 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
709 return true; /* This was a new ModelAction */
714 * @brief Establish reads-from relation between two actions
716 * Perform basic operations involved with establishing a concrete rf relation,
717 * including setting the ModelAction data and checking for release sequences.
719 * @param act The action that is reading (must be a read)
720 * @param rf The action from which we are reading (must be a write)
722 * @return True if this read established synchronization
725 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
728 ASSERT(rf->is_write());
730 act->set_read_from(rf);
731 if (act->is_acquire()) {
732 ClockVector *cv = get_hb_from_write(rf);
735 act->get_cv()->merge(cv);
740 * @brief Synchronizes two actions
742 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
743 * This function performs the synchronization as well as providing other hooks
744 * for other checks along with synchronization.
746 * @param first The left-hand side of the synchronizes-with relation
747 * @param second The right-hand side of the synchronizes-with relation
748 * @return True if the synchronization was successful (i.e., was consistent
749 * with the execution order); false otherwise
751 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
753 if (*second < *first) {
754 ASSERT(0); //This should not happend
757 return second->synchronize_with(first);
761 * @brief Check whether a model action is enabled.
763 * Checks whether an operation would be successful (i.e., is a lock already
764 * locked, or is the joined thread already complete).
766 * For yield-blocking, yields are never enabled.
768 * @param curr is the ModelAction to check whether it is enabled.
769 * @return a bool that indicates whether the action is enabled.
771 bool ModelExecution::check_action_enabled(ModelAction *curr) {
772 if (curr->is_lock()) {
773 cdsc::mutex *lock = curr->get_mutex();
774 struct cdsc::mutex_state *state = lock->get_state();
776 Thread *lock_owner = (Thread *)state->locked;
777 Thread *curr_thread = get_thread(curr);
778 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
784 } else if (curr->is_thread_join()) {
785 Thread *blocking = curr->get_thread_operand();
786 if (!blocking->is_complete()) {
789 } else if (curr->is_sleep()) {
790 if (!fuzzer->shouldSleep(curr))
798 * This is the heart of the model checker routine. It performs model-checking
799 * actions corresponding to a given "current action." Among other processes, it
800 * calculates reads-from relationships, updates synchronization clock vectors,
801 * forms a memory_order constraints graph, and handles replay/backtrack
802 * execution when running permutations of previously-observed executions.
804 * @param curr The current action to process
805 * @return The ModelAction that is actually executed; may be different than
808 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
811 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
812 bool newly_explored = initialize_curr_action(&curr);
816 wake_up_sleeping_actions(curr);
818 SnapVector<ModelAction *> * rf_set = NULL;
819 /* Build may_read_from set for newly-created actions */
820 if (newly_explored && curr->is_read())
821 rf_set = build_may_read_from(curr);
823 bool canprune = false;
825 if (curr->is_read() && !second_part_of_rmw) {
826 canprune = process_read(curr, rf_set);
829 ASSERT(rf_set == NULL);
831 /* Add the action to lists */
832 if (!second_part_of_rmw) {
834 record_atomic_stats(curr);
836 add_action_to_lists(curr, canprune);
839 if (curr->is_write())
840 add_write_to_lists(curr);
842 process_thread_action(curr);
844 if (curr->is_write())
847 if (curr->is_fence())
850 if (curr->is_mutex_op())
856 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
857 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
858 ModelAction *lastread = get_last_action(act->get_tid());
859 lastread->process_rmw(act);
861 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
867 * @brief Updates the mo_graph with the constraints imposed from the current
870 * Basic idea is the following: Go through each other thread and find
871 * the last action that happened before our read. Two cases:
873 * -# The action is a write: that write must either occur before
874 * the write we read from or be the write we read from.
875 * -# The action is a read: the write that that action read from
876 * must occur before the write we read from or be the same write.
878 * @param curr The current action. Must be a read.
879 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
880 * @param check_only If true, then only check whether the current action satisfies
881 * read modification order or not, without modifiying priorset and canprune.
883 * @return True if modification order edges were added; false otherwise
886 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
887 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
889 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
890 ASSERT(curr->is_read());
892 /* Last SC fence in the current thread */
893 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
895 int tid = curr->get_tid();
897 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
898 if ((int)thrd_lists->size() <= tid) {
899 uint oldsize = thrd_lists->size();
900 thrd_lists->resize(priv->next_thread_id);
901 for(uint i = oldsize;i < priv->next_thread_id;i++)
902 new (&(*thrd_lists)[i]) action_list_t();
904 fixup_action_list(thrd_lists);
907 ModelAction *prev_same_thread = NULL;
908 /* Iterate over all threads */
909 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
910 /* Last SC fence in thread tid */
911 ModelAction *last_sc_fence_thread_local = NULL;
913 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
915 /* Last SC fence in thread tid, before last SC fence in current thread */
916 ModelAction *last_sc_fence_thread_before = NULL;
917 if (last_sc_fence_local)
918 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
920 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
921 if (prev_same_thread != NULL &&
922 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
923 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
927 /* Iterate over actions in thread, starting from most recent */
928 action_list_t *list = &(*thrd_lists)[tid];
929 sllnode<ModelAction *> * rit;
930 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
931 ModelAction *act = rit->getVal();
936 /* Don't want to add reflexive edges on 'rf' */
937 if (act->equals(rf)) {
938 if (act->happens_before(curr))
944 if (act->is_write()) {
945 /* C++, Section 29.3 statement 5 */
946 if (curr->is_seqcst() && last_sc_fence_thread_local &&
947 *act < *last_sc_fence_thread_local) {
948 if (mo_graph->checkReachable(rf, act))
951 priorset->push_back(act);
954 /* C++, Section 29.3 statement 4 */
955 else if (act->is_seqcst() && last_sc_fence_local &&
956 *act < *last_sc_fence_local) {
957 if (mo_graph->checkReachable(rf, act))
960 priorset->push_back(act);
963 /* C++, Section 29.3 statement 6 */
964 else if (last_sc_fence_thread_before &&
965 *act < *last_sc_fence_thread_before) {
966 if (mo_graph->checkReachable(rf, act))
969 priorset->push_back(act);
975 * Include at most one act per-thread that "happens
978 if (act->happens_before(curr)) {
980 if (last_sc_fence_local == NULL ||
981 (*last_sc_fence_local < *act)) {
982 prev_same_thread = act;
985 if (act->is_write()) {
986 if (mo_graph->checkReachable(rf, act))
989 priorset->push_back(act);
991 ModelAction *prevrf = act->get_reads_from();
992 if (!prevrf->equals(rf)) {
993 if (mo_graph->checkReachable(rf, prevrf))
996 priorset->push_back(prevrf);
998 if (act->get_tid() == curr->get_tid()) {
999 //Can prune curr from obj list
1013 * Updates the mo_graph with the constraints imposed from the current write.
1015 * Basic idea is the following: Go through each other thread and find
1016 * the lastest action that happened before our write. Two cases:
1018 * (1) The action is a write => that write must occur before
1021 * (2) The action is a read => the write that that action read from
1022 * must occur before the current write.
1024 * This method also handles two other issues:
1026 * (I) Sequential Consistency: Making sure that if the current write is
1027 * seq_cst, that it occurs after the previous seq_cst write.
1029 * (II) Sending the write back to non-synchronizing reads.
1031 * @param curr The current action. Must be a write.
1032 * @param send_fv A vector for stashing reads to which we may pass our future
1033 * value. If NULL, then don't record any future values.
1034 * @return True if modification order edges were added; false otherwise
1036 void ModelExecution::w_modification_order(ModelAction *curr)
1038 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1040 ASSERT(curr->is_write());
1042 SnapList<ModelAction *> edgeset;
1044 if (curr->is_seqcst()) {
1045 /* We have to at least see the last sequentially consistent write,
1046 so we are initialized. */
1047 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1048 if (last_seq_cst != NULL) {
1049 edgeset.push_back(last_seq_cst);
1051 //update map for next query
1052 obj_last_sc_map.put(curr->get_location(), curr);
1055 /* Last SC fence in the current thread */
1056 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1058 /* Iterate over all threads */
1059 for (i = 0;i < thrd_lists->size();i++) {
1060 /* Last SC fence in thread i, before last SC fence in current thread */
1061 ModelAction *last_sc_fence_thread_before = NULL;
1062 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1063 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1065 /* Iterate over actions in thread, starting from most recent */
1066 action_list_t *list = &(*thrd_lists)[i];
1067 sllnode<ModelAction*>* rit;
1068 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1069 ModelAction *act = rit->getVal();
1072 * 1) If RMW and it actually read from something, then we
1073 * already have all relevant edges, so just skip to next
1076 * 2) If RMW and it didn't read from anything, we should
1077 * whatever edge we can get to speed up convergence.
1079 * 3) If normal write, we need to look at earlier actions, so
1080 * continue processing list.
1082 if (curr->is_rmw()) {
1083 if (curr->get_reads_from() != NULL)
1091 /* C++, Section 29.3 statement 7 */
1092 if (last_sc_fence_thread_before && act->is_write() &&
1093 *act < *last_sc_fence_thread_before) {
1094 edgeset.push_back(act);
1099 * Include at most one act per-thread that "happens
1102 if (act->happens_before(curr)) {
1104 * Note: if act is RMW, just add edge:
1106 * The following edge should be handled elsewhere:
1107 * readfrom(act) --mo--> act
1109 if (act->is_write())
1110 edgeset.push_back(act);
1111 else if (act->is_read()) {
1112 //if previous read accessed a null, just keep going
1113 edgeset.push_back(act->get_reads_from());
1119 mo_graph->addEdges(&edgeset, curr);
1124 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1125 * some constraints. This method checks one the following constraint (others
1126 * require compiler support):
1128 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1129 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1131 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1133 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1135 /* Iterate over all threads */
1136 for (i = 0;i < thrd_lists->size();i++) {
1137 const ModelAction *write_after_read = NULL;
1139 /* Iterate over actions in thread, starting from most recent */
1140 action_list_t *list = &(*thrd_lists)[i];
1141 sllnode<ModelAction *>* rit;
1142 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1143 ModelAction *act = rit->getVal();
1145 /* Don't disallow due to act == reader */
1146 if (!reader->happens_before(act) || reader == act)
1148 else if (act->is_write())
1149 write_after_read = act;
1150 else if (act->is_read() && act->get_reads_from() != NULL)
1151 write_after_read = act->get_reads_from();
1154 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1161 * Computes the clock vector that happens before propagates from this write.
1163 * @param rf The action that might be part of a release sequence. Must be a
1165 * @return ClockVector of happens before relation.
1168 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1169 SnapVector<ModelAction *> * processset = NULL;
1170 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1171 ASSERT(rf->is_write());
1172 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1174 if (processset == NULL)
1175 processset = new SnapVector<ModelAction *>();
1176 processset->push_back(rf);
1179 int i = (processset == NULL) ? 0 : processset->size();
1181 ClockVector * vec = NULL;
1183 if (rf->get_rfcv() != NULL) {
1184 vec = rf->get_rfcv();
1185 } else if (rf->is_acquire() && rf->is_release()) {
1187 } else if (rf->is_release() && !rf->is_rmw()) {
1189 } else if (rf->is_release()) {
1190 //have rmw that is release and doesn't have a rfcv
1191 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1194 //operation that isn't release
1195 if (rf->get_last_fence_release()) {
1197 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1199 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1203 vec = new ClockVector(NULL, NULL);
1206 vec = new ClockVector(vec, NULL);
1213 rf = (*processset)[i];
1217 if (processset != NULL)
1223 * Performs various bookkeeping operations for the current ModelAction. For
1224 * instance, adds action to the per-object, per-thread action vector and to the
1225 * action trace list of all thread actions.
1227 * @param act is the ModelAction to add.
1229 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1231 int tid = id_to_int(act->get_tid());
1232 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1233 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1234 act->setActionRef(list->add_back(act));
1237 // Update action trace, a total order of all actions
1238 action_trace.addAction(act);
1241 // Update obj_thrd_map, a per location, per thread, order of actions
1242 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1243 if ((int)vec->size() <= tid) {
1244 uint oldsize = vec->size();
1245 vec->resize(priv->next_thread_id);
1246 for(uint i = oldsize;i < priv->next_thread_id;i++)
1247 new (&(*vec)[i]) action_list_t();
1249 fixup_action_list(vec);
1251 if (!canprune && (act->is_read() || act->is_write()))
1252 (*vec)[tid].addAction(act);
1254 // Update thrd_last_action, the last action taken by each thread
1255 if ((int)thrd_last_action.size() <= tid)
1256 thrd_last_action.resize(get_num_threads());
1257 thrd_last_action[tid] = act;
1259 // Update thrd_last_fence_release, the last release fence taken by each thread
1260 if (act->is_fence() && act->is_release()) {
1261 if ((int)thrd_last_fence_release.size() <= tid)
1262 thrd_last_fence_release.resize(get_num_threads());
1263 thrd_last_fence_release[tid] = act;
1266 if (act->is_wait()) {
1267 void *mutex_loc = (void *) act->get_value();
1268 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1272 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1273 list->addAction(act);
1276 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1277 act->create_cv(NULL);
1278 list->addAction(act);
1282 * Performs various bookkeeping operations for a normal write. The
1283 * complication is that we are typically inserting a normal write
1284 * lazily, so we need to insert it into the middle of lists.
1286 * @param act is the ModelAction to add.
1289 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1291 int tid = id_to_int(act->get_tid());
1292 insertIntoActionListAndSetCV(&action_trace, act);
1294 // Update obj_thrd_map, a per location, per thread, order of actions
1295 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1296 if (tid >= (int)vec->size()) {
1297 uint oldsize =vec->size();
1298 vec->resize(priv->next_thread_id);
1299 for(uint i=oldsize;i<priv->next_thread_id;i++)
1300 new (&(*vec)[i]) action_list_t();
1302 fixup_action_list(vec);
1304 insertIntoActionList(&(*vec)[tid],act);
1306 ModelAction * lastact = thrd_last_action[tid];
1307 // Update thrd_last_action, the last action taken by each thrad
1308 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1309 thrd_last_action[tid] = act;
1313 void ModelExecution::add_write_to_lists(ModelAction *write) {
1314 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1315 int tid = id_to_int(write->get_tid());
1316 if (tid >= (int)vec->size()) {
1317 uint oldsize =vec->size();
1318 vec->resize(priv->next_thread_id);
1319 for(uint i=oldsize;i<priv->next_thread_id;i++)
1320 new (&(*vec)[i]) simple_action_list_t();
1322 write->setActionRef((*vec)[tid].add_back(write));
1326 * @brief Get the last action performed by a particular Thread
1327 * @param tid The thread ID of the Thread in question
1328 * @return The last action in the thread
1330 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1332 int threadid = id_to_int(tid);
1333 if (threadid < (int)thrd_last_action.size())
1334 return thrd_last_action[id_to_int(tid)];
1340 * @brief Get the last fence release performed by a particular Thread
1341 * @param tid The thread ID of the Thread in question
1342 * @return The last fence release in the thread, if one exists; NULL otherwise
1344 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1346 int threadid = id_to_int(tid);
1347 if (threadid < (int)thrd_last_fence_release.size())
1348 return thrd_last_fence_release[id_to_int(tid)];
1354 * Gets the last memory_order_seq_cst write (in the total global sequence)
1355 * performed on a particular object (i.e., memory location), not including the
1357 * @param curr The current ModelAction; also denotes the object location to
1359 * @return The last seq_cst write
1361 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1363 void *location = curr->get_location();
1364 return obj_last_sc_map.get(location);
1368 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1369 * performed in a particular thread, prior to a particular fence.
1370 * @param tid The ID of the thread to check
1371 * @param before_fence The fence from which to begin the search; if NULL, then
1372 * search for the most recent fence in the thread.
1373 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1375 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1377 /* All fences should have location FENCE_LOCATION */
1378 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1383 sllnode<ModelAction*>* rit = list->end();
1386 for (;rit != NULL;rit=rit->getPrev())
1387 if (rit->getVal() == before_fence)
1390 ASSERT(rit->getVal() == before_fence);
1394 for (;rit != NULL;rit=rit->getPrev()) {
1395 ModelAction *act = rit->getVal();
1396 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1403 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1404 * location). This function identifies the mutex according to the current
1405 * action, which is presumed to perform on the same mutex.
1406 * @param curr The current ModelAction; also denotes the object location to
1408 * @return The last unlock operation
1410 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1412 void *location = curr->get_location();
1414 simple_action_list_t *list = obj_map.get(location);
1418 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1419 sllnode<ModelAction*>* rit;
1420 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1421 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1422 return rit->getVal();
1426 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1428 ModelAction *parent = get_last_action(tid);
1430 parent = get_thread(tid)->get_creation();
1435 * Returns the clock vector for a given thread.
1436 * @param tid The thread whose clock vector we want
1437 * @return Desired clock vector
1439 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1441 ModelAction *firstaction=get_parent_action(tid);
1442 return firstaction != NULL ? firstaction->get_cv() : NULL;
1445 bool valequals(uint64_t val1, uint64_t val2, int size) {
1448 return ((uint8_t)val1) == ((uint8_t)val2);
1450 return ((uint16_t)val1) == ((uint16_t)val2);
1452 return ((uint32_t)val1) == ((uint32_t)val2);
1462 * Build up an initial set of all past writes that this 'read' action may read
1463 * from, as well as any previously-observed future values that must still be valid.
1465 * @param curr is the current ModelAction that we are exploring; it must be a
1468 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1470 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1472 ASSERT(curr->is_read());
1474 ModelAction *last_sc_write = NULL;
1476 if (curr->is_seqcst())
1477 last_sc_write = get_last_seq_cst_write(curr);
1479 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1481 /* Iterate over all threads */
1482 if (thrd_lists != NULL)
1483 for (i = 0;i < thrd_lists->size();i++) {
1484 /* Iterate over actions in thread, starting from most recent */
1485 simple_action_list_t *list = &(*thrd_lists)[i];
1486 sllnode<ModelAction *> * rit;
1487 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1488 ModelAction *act = rit->getVal();
1493 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1494 bool allow_read = true;
1496 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1499 /* Need to check whether we will have two RMW reading from the same value */
1500 if (curr->is_rmwr()) {
1501 /* It is okay if we have a failing CAS */
1502 if (!curr->is_rmwrcas() ||
1503 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1504 //Need to make sure we aren't the second RMW
1505 CycleNode * node = mo_graph->getNode_noCreate(act);
1506 if (node != NULL && node->getRMW() != NULL) {
1507 //we are the second RMW
1514 /* Only add feasible reads */
1515 rf_set->push_back(act);
1518 /* Include at most one act per-thread that "happens before" curr */
1519 if (act->happens_before(curr))
1524 if (DBG_ENABLED()) {
1525 model_print("Reached read action:\n");
1527 model_print("End printing read_from_past\n");
1532 static void print_list(action_list_t *list)
1534 sllnode<ModelAction*> *it;
1536 model_print("------------------------------------------------------------------------------------\n");
1537 model_print("# t Action type MO Location Value Rf CV\n");
1538 model_print("------------------------------------------------------------------------------------\n");
1540 unsigned int hash = 0;
1542 for (it = list->begin();it != NULL;it=it->getNext()) {
1543 const ModelAction *act = it->getVal();
1544 if (act->get_seq_number() > 0)
1546 hash = hash^(hash<<3)^(it->getVal()->hash());
1548 model_print("HASH %u\n", hash);
1549 model_print("------------------------------------------------------------------------------------\n");
1552 #if SUPPORT_MOD_ORDER_DUMP
1553 void ModelExecution::dumpGraph(char *filename)
1556 sprintf(buffer, "%s.dot", filename);
1557 FILE *file = fopen(buffer, "w");
1558 fprintf(file, "digraph %s {\n", filename);
1559 mo_graph->dumpNodes(file);
1560 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1562 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1563 ModelAction *act = it->getVal();
1564 if (act->is_read()) {
1565 mo_graph->dot_print_node(file, act);
1566 mo_graph->dot_print_edge(file,
1567 act->get_reads_from(),
1569 "label=\"rf\", color=red, weight=2");
1571 if (thread_array[act->get_tid()]) {
1572 mo_graph->dot_print_edge(file,
1573 thread_array[id_to_int(act->get_tid())],
1575 "label=\"sb\", color=blue, weight=400");
1578 thread_array[act->get_tid()] = act;
1580 fprintf(file, "}\n");
1581 model_free(thread_array);
1586 /** @brief Prints an execution trace summary. */
1587 void ModelExecution::print_summary()
1589 #if SUPPORT_MOD_ORDER_DUMP
1590 char buffername[100];
1591 sprintf(buffername, "exec%04u", get_execution_number());
1592 mo_graph->dumpGraphToFile(buffername);
1593 sprintf(buffername, "graph%04u", get_execution_number());
1594 dumpGraph(buffername);
1597 model_print("Execution trace %d:", get_execution_number());
1598 if (scheduler->all_threads_sleeping())
1599 model_print(" SLEEP-SET REDUNDANT");
1600 if (have_bug_reports())
1601 model_print(" DETECTED BUG(S)");
1605 print_list(&action_trace);
1610 void ModelExecution::print_tail()
1612 model_print("Execution trace %d:\n", get_execution_number());
1614 sllnode<ModelAction*> *it;
1616 model_print("------------------------------------------------------------------------------------\n");
1617 model_print("# t Action type MO Location Value Rf CV\n");
1618 model_print("------------------------------------------------------------------------------------\n");
1620 unsigned int hash = 0;
1624 SnapList<ModelAction *> list;
1625 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1626 if (counter > length)
1629 ModelAction * act = it->getVal();
1630 list.push_front(act);
1634 for (it = list.begin();it != NULL;it=it->getNext()) {
1635 const ModelAction *act = it->getVal();
1636 if (act->get_seq_number() > 0)
1638 hash = hash^(hash<<3)^(it->getVal()->hash());
1640 model_print("HASH %u\n", hash);
1641 model_print("------------------------------------------------------------------------------------\n");
1645 * Add a Thread to the system for the first time. Should only be called once
1647 * @param t The Thread to add
1649 void ModelExecution::add_thread(Thread *t)
1651 unsigned int i = id_to_int(t->get_id());
1652 if (i >= thread_map.size())
1653 thread_map.resize(i + 1);
1655 if (!t->is_model_thread())
1656 scheduler->add_thread(t);
1660 * @brief Get a Thread reference by its ID
1661 * @param tid The Thread's ID
1662 * @return A Thread reference
1664 Thread * ModelExecution::get_thread(thread_id_t tid) const
1666 unsigned int i = id_to_int(tid);
1667 if (i < thread_map.size())
1668 return thread_map[i];
1673 * @brief Get a reference to the Thread in which a ModelAction was executed
1674 * @param act The ModelAction
1675 * @return A Thread reference
1677 Thread * ModelExecution::get_thread(const ModelAction *act) const
1679 return get_thread(act->get_tid());
1683 * @brief Get a Thread reference by its pthread ID
1684 * @param index The pthread's ID
1685 * @return A Thread reference
1687 Thread * ModelExecution::get_pthread(pthread_t pid) {
1688 // pid 1 is reserved for the main thread, pthread ids should start from 2
1690 return get_thread(pid);
1697 uint32_t thread_id = x.v;
1699 if (thread_id < pthread_counter + 1)
1700 return pthread_map[thread_id];
1706 * @brief Check if a Thread is currently enabled
1707 * @param t The Thread to check
1708 * @return True if the Thread is currently enabled
1710 bool ModelExecution::is_enabled(Thread *t) const
1712 return scheduler->is_enabled(t);
1716 * @brief Check if a Thread is currently enabled
1717 * @param tid The ID of the Thread to check
1718 * @return True if the Thread is currently enabled
1720 bool ModelExecution::is_enabled(thread_id_t tid) const
1722 return scheduler->is_enabled(tid);
1726 * @brief Select the next thread to execute based on the curren action
1728 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1729 * actions should be followed by the execution of their child thread. In either
1730 * case, the current action should determine the next thread schedule.
1732 * @param curr The current action
1733 * @return The next thread to run, if the current action will determine this
1734 * selection; otherwise NULL
1736 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1738 /* Do not split atomic RMW */
1739 if (curr->is_rmwr())
1740 return get_thread(curr);
1741 /* Follow CREATE with the created thread */
1742 /* which is not needed, because model.cc takes care of this */
1743 if (curr->get_type() == THREAD_CREATE)
1744 return curr->get_thread_operand();
1745 if (curr->get_type() == PTHREAD_CREATE) {
1746 return curr->get_thread_operand();
1752 * Takes the next step in the execution, if possible.
1753 * @param curr The current step to take
1754 * @return Returns the next Thread to run, if any; NULL if this execution
1757 Thread * ModelExecution::take_step(ModelAction *curr)
1759 Thread *curr_thrd = get_thread(curr);
1760 ASSERT(curr_thrd->get_state() == THREAD_READY);
1762 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1763 curr = check_current_action(curr);
1766 /* Process this action in ModelHistory for records */
1768 model->get_history()->process_action( curr, curr->get_tid() );
1770 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1771 scheduler->remove_thread(curr_thrd);
1773 return action_select_next_thread(curr);
1776 /** This method removes references to an Action before we delete it. */
1778 void ModelExecution::removeAction(ModelAction *act) {
1780 action_trace.removeAction(act);
1783 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1784 (*vec)[act->get_tid()].removeAction(act);
1786 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1787 sllnode<ModelAction *> * listref = act->getActionRef();
1788 if (listref != NULL) {
1789 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1790 list->erase(listref);
1792 } else if (act->is_wait()) {
1793 sllnode<ModelAction *> * listref = act->getActionRef();
1794 if (listref != NULL) {
1795 void *mutex_loc = (void *) act->get_value();
1796 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1798 } else if (act->is_free()) {
1799 sllnode<ModelAction *> * listref = act->getActionRef();
1800 if (listref != NULL) {
1801 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1802 (*vec)[act->get_tid()].erase(listref);
1805 //Clear it from last_sc_map
1806 if (obj_last_sc_map.get(act->get_location()) == act) {
1807 obj_last_sc_map.remove(act->get_location());
1810 //Remove from Cyclegraph
1811 mo_graph->freeAction(act);
1815 /** Computes clock vector that all running threads have already synchronized to. */
1817 ClockVector * ModelExecution::computeMinimalCV() {
1818 ClockVector *cvmin = NULL;
1819 //Thread 0 isn't a real thread, so skip it..
1820 for(unsigned int i = 1;i < thread_map.size();i++) {
1821 Thread * t = thread_map[i];
1822 if (t->get_state() == THREAD_COMPLETED)
1824 thread_id_t tid = int_to_id(i);
1825 ClockVector * cv = get_cv(tid);
1827 cvmin = new ClockVector(cv, NULL);
1829 cvmin->minmerge(cv);
1835 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1837 void ModelExecution::fixupLastAct(ModelAction *act) {
1838 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1839 newact->set_seq_number(get_next_seq_num());
1840 newact->create_cv(act);
1841 newact->set_last_fence_release(act->get_last_fence_release());
1842 add_action_to_lists(newact, false);
1845 /** Compute which actions to free. */
1847 void ModelExecution::collectActions() {
1848 if (priv->used_sequence_numbers < params->traceminsize)
1851 //Compute minimal clock vector for all live threads
1852 ClockVector *cvmin = computeMinimalCV();
1853 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1854 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1856 //Next walk action trace... When we hit an action, see if it is
1857 //invisible (e.g., earlier than the first before the minimum
1858 //clock for the thread... if so erase it and all previous
1859 //actions in cyclegraph
1860 sllnode<ModelAction*> * it;
1861 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1862 ModelAction *act = it->getVal();
1863 modelclock_t actseq = act->get_seq_number();
1865 //See if we are done
1866 if (actseq > maxtofree)
1869 thread_id_t act_tid = act->get_tid();
1870 modelclock_t tid_clock = cvmin->getClock(act_tid);
1872 //Free if it is invisible or we have set a flag to remove visible actions.
1873 if (actseq <= tid_clock || params->removevisible) {
1874 ModelAction * write;
1875 if (act->is_write()) {
1877 } else if (act->is_read()) {
1878 write = act->get_reads_from();
1882 //Mark everything earlier in MO graph to be freed
1883 CycleNode * cn = mo_graph->getNode_noCreate(write);
1885 queue->push_back(cn);
1886 while(!queue->empty()) {
1887 CycleNode * node = queue->back();
1889 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1890 CycleNode * prevnode = node->getInEdge(i);
1891 ModelAction * prevact = prevnode->getAction();
1892 if (prevact->get_type() != READY_FREE) {
1893 prevact->set_free();
1894 queue->push_back(prevnode);
1902 //We may need to remove read actions in the window we don't delete to preserve correctness.
1904 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1905 ModelAction *act = it2->getVal();
1906 //Do iteration early in case we delete the act
1908 bool islastact = false;
1909 ModelAction *lastact = get_last_action(act->get_tid());
1910 if (act == lastact) {
1911 Thread * th = get_thread(act);
1912 islastact = !th->is_complete();
1915 if (act->is_read()) {
1916 if (act->get_reads_from()->is_free()) {
1917 if (act->is_rmw()) {
1918 //Weaken a RMW from a freed store to a write
1919 act->set_type(ATOMIC_WRITE);
1931 //If we don't delete the action, we should remove references to release fences
1933 const ModelAction *rel_fence =act->get_last_fence_release();
1934 if (rel_fence != NULL) {
1935 modelclock_t relfenceseq = rel_fence->get_seq_number();
1936 thread_id_t relfence_tid = rel_fence->get_tid();
1937 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1938 //Remove references to irrelevant release fences
1939 if (relfenceseq <= tid_clock)
1940 act->set_last_fence_release(NULL);
1943 //Now we are in the window of old actions that we remove if possible
1944 for (;it != NULL;) {
1945 ModelAction *act = it->getVal();
1946 //Do iteration early since we may delete node...
1948 bool islastact = false;
1949 ModelAction *lastact = get_last_action(act->get_tid());
1950 if (act == lastact) {
1951 Thread * th = get_thread(act);
1952 islastact = !th->is_complete();
1955 if (act->is_read()) {
1956 if (act->get_reads_from()->is_free()) {
1957 if (act->is_rmw()) {
1958 act->set_type(ATOMIC_WRITE);
1968 } else if (act->is_free()) {
1975 } else if (act->is_write()) {
1976 //Do nothing with write that hasn't been marked to be freed
1977 } else if (islastact) {
1978 //Keep the last action for non-read/write actions
1979 } else if (act->is_fence()) {
1980 //Note that acquire fences can always be safely
1981 //removed, but could incur extra overheads in
1982 //traversals. Removing them before the cvmin seems
1983 //like a good compromise.
1985 //Release fences before the cvmin don't do anything
1986 //because everyone has already synchronized.
1988 //Sequentially fences before cvmin are redundant
1989 //because happens-before will enforce same
1992 modelclock_t actseq = act->get_seq_number();
1993 thread_id_t act_tid = act->get_tid();
1994 modelclock_t tid_clock = cvmin->getClock(act_tid);
1995 if (actseq <= tid_clock) {
1997 // Remove reference to act from thrd_last_fence_release
1998 int thread_id = id_to_int( act->get_tid() );
1999 if (thrd_last_fence_release[thread_id] == act) {
2000 thrd_last_fence_release[thread_id] = NULL;
2006 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
2007 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
2008 //need to keep most recent unlock/wait for each lock
2009 if(act->is_unlock() || act->is_wait()) {
2010 ModelAction * lastlock = get_last_unlock(act);
2011 if (lastlock != act) {
2016 } else if (act->is_create()) {
2017 if (act->get_thread_operand()->is_complete()) {
2029 //If we don't delete the action, we should remove references to release fences
2030 const ModelAction *rel_fence =act->get_last_fence_release();
2031 if (rel_fence != NULL) {
2032 modelclock_t relfenceseq = rel_fence->get_seq_number();
2033 thread_id_t relfence_tid = rel_fence->get_tid();
2034 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2035 //Remove references to irrelevant release fences
2036 if (relfenceseq <= tid_clock)
2037 act->set_last_fence_release(NULL);
2045 Fuzzer * ModelExecution::getFuzzer() {