11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 static unsigned int atomic_load_count = 0;
24 static unsigned int atomic_store_count = 0;
25 static unsigned int atomic_rmw_count = 0;
27 static unsigned int atomic_fence_count = 0;
28 static unsigned int atomic_lock_count = 0;
29 static unsigned int atomic_trylock_count = 0;
30 static unsigned int atomic_unlock_count = 0;
31 static unsigned int atomic_notify_count = 0;
32 static unsigned int atomic_wait_count = 0;
33 static unsigned int atomic_timedwait_count = 0;
37 * Structure for holding small ModelChecker members that should be snapshotted
39 struct model_snapshot_members {
40 model_snapshot_members() :
41 /* First thread created will have id INITIAL_THREAD_ID */
42 next_thread_id(INITIAL_THREAD_ID),
43 used_sequence_numbers(0),
48 ~model_snapshot_members() {
49 for (unsigned int i = 0;i < bugs.size();i++)
54 unsigned int next_thread_id;
55 modelclock_t used_sequence_numbers;
56 SnapVector<bug_message *> bugs;
57 /** @brief Incorrectly-ordered synchronization was made */
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
68 thread_map(2), /* We'll always need at least 2 threads */
73 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 priv(new struct model_snapshot_members ()),
82 mo_graph(new CycleGraph()),
84 fuzzer(new NewFuzzer()),
90 /* Initialize a model-checker thread, for special ModelActions */
91 model_thread = new Thread(get_next_id());
92 add_thread(model_thread);
93 fuzzer->register_engine(m, this);
94 scheduler->register_engine(this);
96 pthread_key_create(&pthreadkey, tlsdestructor);
100 /** @brief Destructor */
101 ModelExecution::~ModelExecution()
103 for (unsigned int i = 0;i < get_num_threads();i++)
104 delete get_thread(int_to_id(i));
110 int ModelExecution::get_execution_number() const
112 return model->get_execution_number();
115 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
117 SnapVector<action_list_t> *tmp = hash->get(ptr);
119 tmp = new SnapVector<action_list_t>();
125 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
127 simple_action_list_t *tmp = hash->get(ptr);
129 tmp = new simple_action_list_t();
135 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
137 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
139 tmp = new SnapVector<simple_action_list_t>();
146 * When vectors of action lists are reallocated due to resize, the root address of
147 * action lists may change. Hence we need to fix the parent pointer of the children
150 static void fixup_action_list(SnapVector<action_list_t> * vec)
152 for (uint i = 0;i < vec->size();i++) {
153 action_list_t * list = &(*vec)[i];
160 static inline void record_atomic_stats(ModelAction * act)
162 switch (act->get_type()) {
164 atomic_store_count++;
173 atomic_fence_count++;
179 atomic_trylock_count++;
182 atomic_unlock_count++;
184 case ATOMIC_NOTIFY_ONE:
185 case ATOMIC_NOTIFY_ALL:
186 atomic_notify_count++;
191 case ATOMIC_TIMEDWAIT:
192 atomic_timedwait_count++;
198 void print_atomic_accesses()
200 model_print("atomic store count: %u\n", atomic_store_count);
201 model_print("atomic load count: %u\n", atomic_load_count);
202 model_print("atomic rmw count: %u\n", atomic_rmw_count);
204 model_print("atomic fence count: %u\n", atomic_fence_count);
205 model_print("atomic lock count: %u\n", atomic_lock_count);
206 model_print("atomic trylock count: %u\n", atomic_trylock_count);
207 model_print("atomic unlock count: %u\n", atomic_unlock_count);
208 model_print("atomic notify count: %u\n", atomic_notify_count);
209 model_print("atomic wait count: %u\n", atomic_wait_count);
210 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
213 /** @return a thread ID for a new Thread */
214 thread_id_t ModelExecution::get_next_id()
216 return priv->next_thread_id++;
219 /** @return the number of user threads created during this execution */
220 unsigned int ModelExecution::get_num_threads() const
222 return priv->next_thread_id;
225 /** @return a sequence number for a new ModelAction */
226 modelclock_t ModelExecution::get_next_seq_num()
228 return ++priv->used_sequence_numbers;
231 /** @return a sequence number for a new ModelAction */
232 modelclock_t ModelExecution::get_curr_seq_num()
234 return priv->used_sequence_numbers;
237 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
238 void ModelExecution::restore_last_seq_num()
240 priv->used_sequence_numbers--;
244 * @brief Should the current action wake up a given thread?
246 * @param curr The current action
247 * @param thread The thread that we might wake up
248 * @return True, if we should wake up the sleeping thread; false otherwise
250 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
252 const ModelAction *asleep = thread->get_pending();
253 /* Don't allow partial RMW to wake anyone up */
256 /* Synchronizing actions may have been backtracked */
257 if (asleep->could_synchronize_with(curr))
259 /* All acquire/release fences and fence-acquire/store-release */
260 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
262 /* Fence-release + store can awake load-acquire on the same location */
263 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
264 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
265 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
268 /* The sleep is literally sleeping */
269 if (asleep->is_sleep()) {
270 if (fuzzer->shouldWake(asleep))
277 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
279 for (unsigned int i = 0;i < get_num_threads();i++) {
280 Thread *thr = get_thread(int_to_id(i));
281 if (scheduler->is_sleep_set(thr)) {
282 if (should_wake_up(curr, thr)) {
283 /* Remove this thread from sleep set */
284 scheduler->remove_sleep(thr);
285 if (thr->get_pending()->is_sleep())
286 thr->set_wakeup_state(true);
292 void ModelExecution::assert_bug(const char *msg)
294 priv->bugs.push_back(new bug_message(msg));
298 /** @return True, if any bugs have been reported for this execution */
299 bool ModelExecution::have_bug_reports() const
301 return priv->bugs.size() != 0;
304 SnapVector<bug_message *> * ModelExecution::get_bugs() const
310 * Check whether the current trace has triggered an assertion which should halt
313 * @return True, if the execution should be aborted; false otherwise
315 bool ModelExecution::has_asserted() const
317 return priv->asserted;
321 * Trigger a trace assertion which should cause this execution to be halted.
322 * This can be due to a detected bug or due to an infeasibility that should
325 void ModelExecution::set_assert()
327 priv->asserted = true;
331 * Check if we are in a deadlock. Should only be called at the end of an
332 * execution, although it should not give false positives in the middle of an
333 * execution (there should be some ENABLED thread).
335 * @return True if program is in a deadlock; false otherwise
337 bool ModelExecution::is_deadlocked() const
339 bool blocking_threads = false;
340 for (unsigned int i = 0;i < get_num_threads();i++) {
341 thread_id_t tid = int_to_id(i);
344 Thread *t = get_thread(tid);
345 if (!t->is_model_thread() && t->get_pending())
346 blocking_threads = true;
348 return blocking_threads;
352 * Check if this is a complete execution. That is, have all thread completed
353 * execution (rather than exiting because sleep sets have forced a redundant
356 * @return True if the execution is complete.
358 bool ModelExecution::is_complete_execution() const
360 for (unsigned int i = 0;i < get_num_threads();i++)
361 if (is_enabled(int_to_id(i)))
366 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
367 uint64_t value = *((const uint64_t *) location);
368 modelclock_t storeclock;
369 thread_id_t storethread;
370 getStoreThreadAndClock(location, &storethread, &storeclock);
371 setAtomicStoreFlag(location);
372 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
373 act->set_seq_number(storeclock);
374 add_normal_write_to_lists(act);
375 add_write_to_lists(act);
376 w_modification_order(act);
378 model->get_history()->process_action(act, act->get_tid());
384 * Processes a read model action.
385 * @param curr is the read model action to process.
386 * @param rf_set is the set of model actions we can possibly read from
387 * @return True if the read can be pruned from the thread map list.
389 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
391 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
392 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
393 if (hasnonatomicstore) {
394 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
395 rf_set->push_back(nonatomicstore);
398 // Remove writes that violate read modification order
401 while (i < rf_set->size()) {
402 ModelAction * rf = (*rf_set)[i];
403 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
404 (*rf_set)[i] = rf_set->back();
411 int index = fuzzer->selectWrite(curr, rf_set);
413 ModelAction *rf = (*rf_set)[index];
416 bool canprune = false;
417 if (r_modification_order(curr, rf, priorset, &canprune)) {
418 for(unsigned int i=0;i<priorset->size();i++) {
419 mo_graph->addEdge((*priorset)[i], rf);
422 get_thread(curr)->set_return_value(curr->get_return_value());
424 //Update acquire fence clock vector
425 ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
427 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
428 return canprune && (curr->get_type() == ATOMIC_READ);
431 (*rf_set)[index] = rf_set->back();
437 * Processes a lock, trylock, or unlock model action. @param curr is
438 * the read model action to process.
440 * The try lock operation checks whether the lock is taken. If not,
441 * it falls to the normal lock operation case. If so, it returns
444 * The lock operation has already been checked that it is enabled, so
445 * it just grabs the lock and synchronizes with the previous unlock.
447 * The unlock operation has to re-enable all of the threads that are
448 * waiting on the lock.
450 * @return True if synchronization was updated; false otherwise
452 bool ModelExecution::process_mutex(ModelAction *curr)
454 cdsc::mutex *mutex = curr->get_mutex();
455 struct cdsc::mutex_state *state = NULL;
458 state = mutex->get_state();
460 switch (curr->get_type()) {
461 case ATOMIC_TRYLOCK: {
462 bool success = !state->locked;
463 curr->set_try_lock(success);
465 get_thread(curr)->set_return_value(0);
468 get_thread(curr)->set_return_value(1);
470 //otherwise fall into the lock case
472 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
473 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
474 // assert_bug("Lock access before initialization");
476 // TODO: lock count for recursive mutexes
477 state->locked = get_thread(curr);
478 ModelAction *unlock = get_last_unlock(curr);
479 //synchronize with the previous unlock statement
480 if (unlock != NULL) {
481 synchronize(unlock, curr);
487 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
488 if (fuzzer->shouldWait(curr)) {
489 /* wake up the other threads */
490 for (unsigned int i = 0;i < get_num_threads();i++) {
491 Thread *t = get_thread(int_to_id(i));
492 Thread *curr_thrd = get_thread(curr);
493 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
497 /* unlock the lock - after checking who was waiting on it */
498 state->locked = NULL;
500 /* remove old wait action and disable this thread */
501 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
502 for (sllnode<ModelAction *> * it = waiters->begin();it != NULL;it = it->getNext()) {
503 ModelAction * wait = it->getVal();
504 if (wait->get_tid() == curr->get_tid()) {
510 waiters->push_back(curr);
511 scheduler->sleep(get_thread(curr));
516 case ATOMIC_TIMEDWAIT:
517 case ATOMIC_UNLOCK: {
518 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
519 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
520 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
521 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
522 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
523 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
524 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
525 //MUST EVENMTUALLY RELEASE...
527 // TODO: lock count for recursive mutexes
528 /* wake up the other threads */
529 for (unsigned int i = 0;i < get_num_threads();i++) {
530 Thread *t = get_thread(int_to_id(i));
531 Thread *curr_thrd = get_thread(curr);
532 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
536 /* unlock the lock - after checking who was waiting on it */
537 state->locked = NULL;
540 case ATOMIC_NOTIFY_ALL: {
541 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
542 //activate all the waiting threads
543 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
544 scheduler->wake(get_thread(rit->getVal()));
549 case ATOMIC_NOTIFY_ONE: {
550 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
551 if (waiters->size() != 0) {
552 Thread * thread = fuzzer->selectNotify(waiters);
553 scheduler->wake(thread);
565 * Process a write ModelAction
566 * @param curr The ModelAction to process
567 * @return True if the mo_graph was updated or promises were resolved
569 void ModelExecution::process_write(ModelAction *curr)
571 w_modification_order(curr);
572 get_thread(curr)->set_return_value(VALUE_NONE);
576 * Process a fence ModelAction
577 * @param curr The ModelAction to process
578 * @return True if synchronization was updated
580 void ModelExecution::process_fence(ModelAction *curr)
583 * fence-relaxed: no-op
584 * fence-release: only log the occurence (not in this function), for
585 * use in later synchronization
586 * fence-acquire (this function): search for hypothetical release
588 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
590 if (curr->is_acquire()) {
591 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
596 * @brief Process the current action for thread-related activity
598 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
599 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
600 * synchronization, etc. This function is a no-op for non-THREAD actions
601 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
603 * @param curr The current action
604 * @return True if synchronization was updated or a thread completed
606 void ModelExecution::process_thread_action(ModelAction *curr)
608 switch (curr->get_type()) {
609 case THREAD_CREATE: {
610 thrd_t *thrd = (thrd_t *)curr->get_location();
611 struct thread_params *params = (struct thread_params *)curr->get_value();
612 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
613 curr->set_thread_operand(th);
615 th->set_creation(curr);
618 case PTHREAD_CREATE: {
619 (*(uint32_t *)curr->get_location()) = pthread_counter++;
621 struct pthread_params *params = (struct pthread_params *)curr->get_value();
622 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
623 curr->set_thread_operand(th);
625 th->set_creation(curr);
627 if ( pthread_map.size() < pthread_counter )
628 pthread_map.resize( pthread_counter );
629 pthread_map[ pthread_counter-1 ] = th;
634 Thread *blocking = curr->get_thread_operand();
635 ModelAction *act = get_last_action(blocking->get_id());
636 synchronize(act, curr);
640 Thread *blocking = curr->get_thread_operand();
641 ModelAction *act = get_last_action(blocking->get_id());
642 synchronize(act, curr);
646 case THREADONLY_FINISH:
647 case THREAD_FINISH: {
648 Thread *th = get_thread(curr);
649 if (curr->get_type() == THREAD_FINISH &&
650 th == model->getInitThread()) {
656 /* Wake up any joining threads */
657 for (unsigned int i = 0;i < get_num_threads();i++) {
658 Thread *waiting = get_thread(int_to_id(i));
659 if (waiting->waiting_on() == th &&
660 waiting->get_pending()->is_thread_join())
661 scheduler->wake(waiting);
670 Thread *th = get_thread(curr);
671 th->set_pending(curr);
672 scheduler->add_sleep(th);
681 * Initialize the current action by performing one or more of the following
682 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
683 * manipulating backtracking sets, allocating and
684 * initializing clock vectors, and computing the promises to fulfill.
686 * @param curr The current action, as passed from the user context; may be
687 * freed/invalidated after the execution of this function, with a different
688 * action "returned" its place (pass-by-reference)
689 * @return True if curr is a newly-explored action; false otherwise
691 bool ModelExecution::initialize_curr_action(ModelAction **curr)
693 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
694 ModelAction *newcurr = process_rmw(*curr);
700 ModelAction *newcurr = *curr;
702 newcurr->set_seq_number(get_next_seq_num());
703 /* Always compute new clock vector */
704 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
706 /* Assign most recent release fence */
707 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
709 return true; /* This was a new ModelAction */
714 * @brief Establish reads-from relation between two actions
716 * Perform basic operations involved with establishing a concrete rf relation,
717 * including setting the ModelAction data and checking for release sequences.
719 * @param act The action that is reading (must be a read)
720 * @param rf The action from which we are reading (must be a write)
722 * @return True if this read established synchronization
725 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
728 ASSERT(rf->is_write());
730 act->set_read_from(rf);
731 if (act->is_acquire()) {
732 ClockVector *cv = get_hb_from_write(rf);
735 act->get_cv()->merge(cv);
740 * @brief Synchronizes two actions
742 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
743 * This function performs the synchronization as well as providing other hooks
744 * for other checks along with synchronization.
746 * @param first The left-hand side of the synchronizes-with relation
747 * @param second The right-hand side of the synchronizes-with relation
748 * @return True if the synchronization was successful (i.e., was consistent
749 * with the execution order); false otherwise
751 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
753 if (*second < *first) {
754 ASSERT(0); //This should not happend
757 return second->synchronize_with(first);
761 * @brief Check whether a model action is enabled.
763 * Checks whether an operation would be successful (i.e., is a lock already
764 * locked, or is the joined thread already complete).
766 * For yield-blocking, yields are never enabled.
768 * @param curr is the ModelAction to check whether it is enabled.
769 * @return a bool that indicates whether the action is enabled.
771 bool ModelExecution::check_action_enabled(ModelAction *curr) {
772 if (curr->is_lock()) {
773 cdsc::mutex *lock = curr->get_mutex();
774 struct cdsc::mutex_state *state = lock->get_state();
776 Thread *lock_owner = (Thread *)state->locked;
777 Thread *curr_thread = get_thread(curr);
778 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
784 } else if (curr->is_thread_join()) {
785 Thread *blocking = curr->get_thread_operand();
786 if (!blocking->is_complete()) {
789 } else if (curr->is_sleep()) {
790 if (!fuzzer->shouldSleep(curr))
798 * This is the heart of the model checker routine. It performs model-checking
799 * actions corresponding to a given "current action." Among other processes, it
800 * calculates reads-from relationships, updates synchronization clock vectors,
801 * forms a memory_order constraints graph, and handles replay/backtrack
802 * execution when running permutations of previously-observed executions.
804 * @param curr The current action to process
805 * @return The ModelAction that is actually executed; may be different than
808 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
811 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
812 bool newly_explored = initialize_curr_action(&curr);
816 wake_up_sleeping_actions(curr);
818 SnapVector<ModelAction *> * rf_set = NULL;
819 /* Build may_read_from set for newly-created actions */
820 if (newly_explored && curr->is_read())
821 rf_set = build_may_read_from(curr);
823 bool canprune = false;
825 if (curr->is_read() && !second_part_of_rmw) {
826 canprune = process_read(curr, rf_set);
829 ASSERT(rf_set == NULL);
831 /* Add the action to lists */
832 if (!second_part_of_rmw) {
834 record_atomic_stats(curr);
836 add_action_to_lists(curr, canprune);
839 if (curr->is_write())
840 add_write_to_lists(curr);
842 process_thread_action(curr);
844 if (curr->is_write())
847 if (curr->is_fence())
850 if (curr->is_mutex_op())
856 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
857 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
858 ModelAction *lastread = get_last_action(act->get_tid());
859 lastread->process_rmw(act);
861 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
867 * @brief Updates the mo_graph with the constraints imposed from the current
870 * Basic idea is the following: Go through each other thread and find
871 * the last action that happened before our read. Two cases:
873 * -# The action is a write: that write must either occur before
874 * the write we read from or be the write we read from.
875 * -# The action is a read: the write that that action read from
876 * must occur before the write we read from or be the same write.
878 * @param curr The current action. Must be a read.
879 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
880 * @param check_only If true, then only check whether the current action satisfies
881 * read modification order or not, without modifiying priorset and canprune.
883 * @return True if modification order edges were added; false otherwise
886 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
887 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
889 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
890 ASSERT(curr->is_read());
892 /* Last SC fence in the current thread */
893 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
895 int tid = curr->get_tid();
897 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
898 if ((int)thrd_lists->size() <= tid) {
899 uint oldsize = thrd_lists->size();
900 thrd_lists->resize(priv->next_thread_id);
901 for(uint i = oldsize;i < priv->next_thread_id;i++)
902 new (&(*thrd_lists)[i]) action_list_t();
904 fixup_action_list(thrd_lists);
907 ModelAction *prev_same_thread = NULL;
908 /* Iterate over all threads */
909 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
910 /* Last SC fence in thread tid */
911 ModelAction *last_sc_fence_thread_local = NULL;
913 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
915 /* Last SC fence in thread tid, before last SC fence in current thread */
916 ModelAction *last_sc_fence_thread_before = NULL;
917 if (last_sc_fence_local)
918 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
920 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
921 if (prev_same_thread != NULL &&
922 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
923 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
927 /* Iterate over actions in thread, starting from most recent */
928 action_list_t *list = &(*thrd_lists)[tid];
929 sllnode<ModelAction *> * rit;
930 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
931 ModelAction *act = rit->getVal();
936 /* Don't want to add reflexive edges on 'rf' */
937 if (act->equals(rf)) {
938 if (act->happens_before(curr))
944 if (act->is_write()) {
945 /* C++, Section 29.3 statement 5 */
946 if (curr->is_seqcst() && last_sc_fence_thread_local &&
947 *act < *last_sc_fence_thread_local) {
948 if (mo_graph->checkReachable(rf, act))
951 priorset->push_back(act);
954 /* C++, Section 29.3 statement 4 */
955 else if (act->is_seqcst() && last_sc_fence_local &&
956 *act < *last_sc_fence_local) {
957 if (mo_graph->checkReachable(rf, act))
960 priorset->push_back(act);
963 /* C++, Section 29.3 statement 6 */
964 else if (last_sc_fence_thread_before &&
965 *act < *last_sc_fence_thread_before) {
966 if (mo_graph->checkReachable(rf, act))
969 priorset->push_back(act);
975 * Include at most one act per-thread that "happens
978 if (act->happens_before(curr)) {
980 if (last_sc_fence_local == NULL ||
981 (*last_sc_fence_local < *act)) {
982 prev_same_thread = act;
985 if (act->is_write()) {
986 if (mo_graph->checkReachable(rf, act))
989 priorset->push_back(act);
991 ModelAction *prevrf = act->get_reads_from();
992 if (!prevrf->equals(rf)) {
993 if (mo_graph->checkReachable(rf, prevrf))
996 priorset->push_back(prevrf);
998 if (act->get_tid() == curr->get_tid()) {
999 //Can prune curr from obj list
1013 * Updates the mo_graph with the constraints imposed from the current write.
1015 * Basic idea is the following: Go through each other thread and find
1016 * the lastest action that happened before our write. Two cases:
1018 * (1) The action is a write => that write must occur before
1021 * (2) The action is a read => the write that that action read from
1022 * must occur before the current write.
1024 * This method also handles two other issues:
1026 * (I) Sequential Consistency: Making sure that if the current write is
1027 * seq_cst, that it occurs after the previous seq_cst write.
1029 * (II) Sending the write back to non-synchronizing reads.
1031 * @param curr The current action. Must be a write.
1032 * @param send_fv A vector for stashing reads to which we may pass our future
1033 * value. If NULL, then don't record any future values.
1034 * @return True if modification order edges were added; false otherwise
1036 void ModelExecution::w_modification_order(ModelAction *curr)
1038 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1040 ASSERT(curr->is_write());
1042 SnapList<ModelAction *> edgeset;
1044 if (curr->is_seqcst()) {
1045 /* We have to at least see the last sequentially consistent write,
1046 so we are initialized. */
1047 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1048 if (last_seq_cst != NULL) {
1049 edgeset.push_back(last_seq_cst);
1051 //update map for next query
1052 obj_last_sc_map.put(curr->get_location(), curr);
1055 /* Last SC fence in the current thread */
1056 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1058 /* Iterate over all threads */
1059 for (i = 0;i < thrd_lists->size();i++) {
1060 /* Last SC fence in thread i, before last SC fence in current thread */
1061 ModelAction *last_sc_fence_thread_before = NULL;
1062 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1063 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1065 /* Iterate over actions in thread, starting from most recent */
1066 action_list_t *list = &(*thrd_lists)[i];
1067 sllnode<ModelAction*>* rit;
1068 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1069 ModelAction *act = rit->getVal();
1072 * 1) If RMW and it actually read from something, then we
1073 * already have all relevant edges, so just skip to next
1076 * 2) If RMW and it didn't read from anything, we should
1077 * whatever edge we can get to speed up convergence.
1079 * 3) If normal write, we need to look at earlier actions, so
1080 * continue processing list.
1082 if (curr->is_rmw()) {
1083 if (curr->get_reads_from() != NULL)
1091 /* C++, Section 29.3 statement 7 */
1092 if (last_sc_fence_thread_before && act->is_write() &&
1093 *act < *last_sc_fence_thread_before) {
1094 edgeset.push_back(act);
1099 * Include at most one act per-thread that "happens
1102 if (act->happens_before(curr)) {
1104 * Note: if act is RMW, just add edge:
1106 * The following edge should be handled elsewhere:
1107 * readfrom(act) --mo--> act
1109 if (act->is_write())
1110 edgeset.push_back(act);
1111 else if (act->is_read()) {
1112 //if previous read accessed a null, just keep going
1113 edgeset.push_back(act->get_reads_from());
1119 mo_graph->addEdges(&edgeset, curr);
1124 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1125 * some constraints. This method checks one the following constraint (others
1126 * require compiler support):
1128 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1129 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1131 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1133 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1135 /* Iterate over all threads */
1136 for (i = 0;i < thrd_lists->size();i++) {
1137 const ModelAction *write_after_read = NULL;
1139 /* Iterate over actions in thread, starting from most recent */
1140 action_list_t *list = &(*thrd_lists)[i];
1141 sllnode<ModelAction *>* rit;
1142 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1143 ModelAction *act = rit->getVal();
1145 /* Don't disallow due to act == reader */
1146 if (!reader->happens_before(act) || reader == act)
1148 else if (act->is_write())
1149 write_after_read = act;
1150 else if (act->is_read() && act->get_reads_from() != NULL)
1151 write_after_read = act->get_reads_from();
1154 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1161 * Computes the clock vector that happens before propagates from this write.
1163 * @param rf The action that might be part of a release sequence. Must be a
1165 * @return ClockVector of happens before relation.
1168 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1169 SnapVector<ModelAction *> * processset = NULL;
1170 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1171 ASSERT(rf->is_write());
1172 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1174 if (processset == NULL)
1175 processset = new SnapVector<ModelAction *>();
1176 processset->push_back(rf);
1179 int i = (processset == NULL) ? 0 : processset->size();
1181 ClockVector * vec = NULL;
1183 if (rf->get_rfcv() != NULL) {
1184 vec = rf->get_rfcv();
1185 } else if (rf->is_acquire() && rf->is_release()) {
1187 } else if (rf->is_release() && !rf->is_rmw()) {
1189 } else if (rf->is_release()) {
1190 //have rmw that is release and doesn't have a rfcv
1191 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1194 //operation that isn't release
1195 if (rf->get_last_fence_release()) {
1197 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1199 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1201 if (vec == NULL && rf->is_rmw()) {
1202 vec = new ClockVector(NULL, NULL);
1209 rf = (*processset)[i];
1213 if (processset != NULL)
1219 * Performs various bookkeeping operations for the current ModelAction. For
1220 * instance, adds action to the per-object, per-thread action vector and to the
1221 * action trace list of all thread actions.
1223 * @param act is the ModelAction to add.
1225 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1227 int tid = id_to_int(act->get_tid());
1228 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1229 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1230 act->setActionRef(list->add_back(act));
1233 // Update action trace, a total order of all actions
1234 action_trace.addAction(act);
1237 // Update obj_thrd_map, a per location, per thread, order of actions
1238 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1239 if ((int)vec->size() <= tid) {
1240 uint oldsize = vec->size();
1241 vec->resize(priv->next_thread_id);
1242 for(uint i = oldsize;i < priv->next_thread_id;i++)
1243 new (&(*vec)[i]) action_list_t();
1245 fixup_action_list(vec);
1247 if (!canprune && (act->is_read() || act->is_write()))
1248 (*vec)[tid].addAction(act);
1250 // Update thrd_last_action, the last action taken by each thread
1251 if ((int)thrd_last_action.size() <= tid)
1252 thrd_last_action.resize(get_num_threads());
1253 thrd_last_action[tid] = act;
1255 // Update thrd_last_fence_release, the last release fence taken by each thread
1256 if (act->is_fence() && act->is_release()) {
1257 if ((int)thrd_last_fence_release.size() <= tid)
1258 thrd_last_fence_release.resize(get_num_threads());
1259 thrd_last_fence_release[tid] = act;
1262 if (act->is_wait()) {
1263 void *mutex_loc = (void *) act->get_value();
1264 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1268 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1269 list->addAction(act);
1272 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1273 act->create_cv(NULL);
1274 list->addAction(act);
1278 * Performs various bookkeeping operations for a normal write. The
1279 * complication is that we are typically inserting a normal write
1280 * lazily, so we need to insert it into the middle of lists.
1282 * @param act is the ModelAction to add.
1285 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1287 int tid = id_to_int(act->get_tid());
1288 insertIntoActionListAndSetCV(&action_trace, act);
1290 // Update obj_thrd_map, a per location, per thread, order of actions
1291 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1292 if (tid >= (int)vec->size()) {
1293 uint oldsize =vec->size();
1294 vec->resize(priv->next_thread_id);
1295 for(uint i=oldsize;i<priv->next_thread_id;i++)
1296 new (&(*vec)[i]) action_list_t();
1298 fixup_action_list(vec);
1300 insertIntoActionList(&(*vec)[tid],act);
1302 ModelAction * lastact = thrd_last_action[tid];
1303 // Update thrd_last_action, the last action taken by each thrad
1304 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1305 thrd_last_action[tid] = act;
1309 void ModelExecution::add_write_to_lists(ModelAction *write) {
1310 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1311 int tid = id_to_int(write->get_tid());
1312 if (tid >= (int)vec->size()) {
1313 uint oldsize =vec->size();
1314 vec->resize(priv->next_thread_id);
1315 for(uint i=oldsize;i<priv->next_thread_id;i++)
1316 new (&(*vec)[i]) simple_action_list_t();
1318 write->setActionRef((*vec)[tid].add_back(write));
1322 * @brief Get the last action performed by a particular Thread
1323 * @param tid The thread ID of the Thread in question
1324 * @return The last action in the thread
1326 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1328 int threadid = id_to_int(tid);
1329 if (threadid < (int)thrd_last_action.size())
1330 return thrd_last_action[id_to_int(tid)];
1336 * @brief Get the last fence release performed by a particular Thread
1337 * @param tid The thread ID of the Thread in question
1338 * @return The last fence release in the thread, if one exists; NULL otherwise
1340 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1342 int threadid = id_to_int(tid);
1343 if (threadid < (int)thrd_last_fence_release.size())
1344 return thrd_last_fence_release[id_to_int(tid)];
1350 * Gets the last memory_order_seq_cst write (in the total global sequence)
1351 * performed on a particular object (i.e., memory location), not including the
1353 * @param curr The current ModelAction; also denotes the object location to
1355 * @return The last seq_cst write
1357 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1359 void *location = curr->get_location();
1360 return obj_last_sc_map.get(location);
1364 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1365 * performed in a particular thread, prior to a particular fence.
1366 * @param tid The ID of the thread to check
1367 * @param before_fence The fence from which to begin the search; if NULL, then
1368 * search for the most recent fence in the thread.
1369 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1371 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1373 /* All fences should have location FENCE_LOCATION */
1374 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1379 sllnode<ModelAction*>* rit = list->end();
1382 for (;rit != NULL;rit=rit->getPrev())
1383 if (rit->getVal() == before_fence)
1386 ASSERT(rit->getVal() == before_fence);
1390 for (;rit != NULL;rit=rit->getPrev()) {
1391 ModelAction *act = rit->getVal();
1392 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1399 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1400 * location). This function identifies the mutex according to the current
1401 * action, which is presumed to perform on the same mutex.
1402 * @param curr The current ModelAction; also denotes the object location to
1404 * @return The last unlock operation
1406 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1408 void *location = curr->get_location();
1410 simple_action_list_t *list = obj_map.get(location);
1414 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1415 sllnode<ModelAction*>* rit;
1416 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1417 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1418 return rit->getVal();
1422 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1424 ModelAction *parent = get_last_action(tid);
1426 parent = get_thread(tid)->get_creation();
1431 * Returns the clock vector for a given thread.
1432 * @param tid The thread whose clock vector we want
1433 * @return Desired clock vector
1435 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1437 ModelAction *firstaction=get_parent_action(tid);
1438 return firstaction != NULL ? firstaction->get_cv() : NULL;
1441 bool valequals(uint64_t val1, uint64_t val2, int size) {
1444 return ((uint8_t)val1) == ((uint8_t)val2);
1446 return ((uint16_t)val1) == ((uint16_t)val2);
1448 return ((uint32_t)val1) == ((uint32_t)val2);
1458 * Build up an initial set of all past writes that this 'read' action may read
1459 * from, as well as any previously-observed future values that must still be valid.
1461 * @param curr is the current ModelAction that we are exploring; it must be a
1464 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1466 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1468 ASSERT(curr->is_read());
1470 ModelAction *last_sc_write = NULL;
1472 if (curr->is_seqcst())
1473 last_sc_write = get_last_seq_cst_write(curr);
1475 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1477 /* Iterate over all threads */
1478 if (thrd_lists != NULL)
1479 for (i = 0;i < thrd_lists->size();i++) {
1480 /* Iterate over actions in thread, starting from most recent */
1481 simple_action_list_t *list = &(*thrd_lists)[i];
1482 sllnode<ModelAction *> * rit;
1483 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1484 ModelAction *act = rit->getVal();
1489 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1490 bool allow_read = true;
1492 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1495 /* Need to check whether we will have two RMW reading from the same value */
1496 if (curr->is_rmwr()) {
1497 /* It is okay if we have a failing CAS */
1498 if (!curr->is_rmwrcas() ||
1499 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1500 //Need to make sure we aren't the second RMW
1501 CycleNode * node = mo_graph->getNode_noCreate(act);
1502 if (node != NULL && node->getRMW() != NULL) {
1503 //we are the second RMW
1510 /* Only add feasible reads */
1511 rf_set->push_back(act);
1514 /* Include at most one act per-thread that "happens before" curr */
1515 if (act->happens_before(curr))
1520 if (DBG_ENABLED()) {
1521 model_print("Reached read action:\n");
1523 model_print("End printing read_from_past\n");
1528 static void print_list(action_list_t *list)
1530 sllnode<ModelAction*> *it;
1532 model_print("------------------------------------------------------------------------------------\n");
1533 model_print("# t Action type MO Location Value Rf CV\n");
1534 model_print("------------------------------------------------------------------------------------\n");
1536 unsigned int hash = 0;
1538 for (it = list->begin();it != NULL;it=it->getNext()) {
1539 const ModelAction *act = it->getVal();
1540 if (act->get_seq_number() > 0)
1542 hash = hash^(hash<<3)^(it->getVal()->hash());
1544 model_print("HASH %u\n", hash);
1545 model_print("------------------------------------------------------------------------------------\n");
1548 #if SUPPORT_MOD_ORDER_DUMP
1549 void ModelExecution::dumpGraph(char *filename)
1552 sprintf(buffer, "%s.dot", filename);
1553 FILE *file = fopen(buffer, "w");
1554 fprintf(file, "digraph %s {\n", filename);
1555 mo_graph->dumpNodes(file);
1556 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1558 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1559 ModelAction *act = it->getVal();
1560 if (act->is_read()) {
1561 mo_graph->dot_print_node(file, act);
1562 mo_graph->dot_print_edge(file,
1563 act->get_reads_from(),
1565 "label=\"rf\", color=red, weight=2");
1567 if (thread_array[act->get_tid()]) {
1568 mo_graph->dot_print_edge(file,
1569 thread_array[id_to_int(act->get_tid())],
1571 "label=\"sb\", color=blue, weight=400");
1574 thread_array[act->get_tid()] = act;
1576 fprintf(file, "}\n");
1577 model_free(thread_array);
1582 /** @brief Prints an execution trace summary. */
1583 void ModelExecution::print_summary()
1585 #if SUPPORT_MOD_ORDER_DUMP
1586 char buffername[100];
1587 sprintf(buffername, "exec%04u", get_execution_number());
1588 mo_graph->dumpGraphToFile(buffername);
1589 sprintf(buffername, "graph%04u", get_execution_number());
1590 dumpGraph(buffername);
1593 model_print("Execution trace %d:", get_execution_number());
1594 if (scheduler->all_threads_sleeping())
1595 model_print(" SLEEP-SET REDUNDANT");
1596 if (have_bug_reports())
1597 model_print(" DETECTED BUG(S)");
1601 print_list(&action_trace);
1606 void ModelExecution::print_tail()
1608 model_print("Execution trace %d:\n", get_execution_number());
1610 sllnode<ModelAction*> *it;
1612 model_print("------------------------------------------------------------------------------------\n");
1613 model_print("# t Action type MO Location Value Rf CV\n");
1614 model_print("------------------------------------------------------------------------------------\n");
1616 unsigned int hash = 0;
1620 SnapList<ModelAction *> list;
1621 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1622 if (counter > length)
1625 ModelAction * act = it->getVal();
1626 list.push_front(act);
1630 for (it = list.begin();it != NULL;it=it->getNext()) {
1631 const ModelAction *act = it->getVal();
1632 if (act->get_seq_number() > 0)
1634 hash = hash^(hash<<3)^(it->getVal()->hash());
1636 model_print("HASH %u\n", hash);
1637 model_print("------------------------------------------------------------------------------------\n");
1641 * Add a Thread to the system for the first time. Should only be called once
1643 * @param t The Thread to add
1645 void ModelExecution::add_thread(Thread *t)
1647 unsigned int i = id_to_int(t->get_id());
1648 if (i >= thread_map.size())
1649 thread_map.resize(i + 1);
1651 if (!t->is_model_thread())
1652 scheduler->add_thread(t);
1656 * @brief Get a Thread reference by its ID
1657 * @param tid The Thread's ID
1658 * @return A Thread reference
1660 Thread * ModelExecution::get_thread(thread_id_t tid) const
1662 unsigned int i = id_to_int(tid);
1663 if (i < thread_map.size())
1664 return thread_map[i];
1669 * @brief Get a reference to the Thread in which a ModelAction was executed
1670 * @param act The ModelAction
1671 * @return A Thread reference
1673 Thread * ModelExecution::get_thread(const ModelAction *act) const
1675 return get_thread(act->get_tid());
1679 * @brief Get a Thread reference by its pthread ID
1680 * @param index The pthread's ID
1681 * @return A Thread reference
1683 Thread * ModelExecution::get_pthread(pthread_t pid) {
1684 // pid 1 is reserved for the main thread, pthread ids should start from 2
1686 return get_thread(pid);
1693 uint32_t thread_id = x.v;
1695 if (thread_id < pthread_counter + 1)
1696 return pthread_map[thread_id];
1702 * @brief Check if a Thread is currently enabled
1703 * @param t The Thread to check
1704 * @return True if the Thread is currently enabled
1706 bool ModelExecution::is_enabled(Thread *t) const
1708 return scheduler->is_enabled(t);
1712 * @brief Check if a Thread is currently enabled
1713 * @param tid The ID of the Thread to check
1714 * @return True if the Thread is currently enabled
1716 bool ModelExecution::is_enabled(thread_id_t tid) const
1718 return scheduler->is_enabled(tid);
1722 * @brief Select the next thread to execute based on the curren action
1724 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1725 * actions should be followed by the execution of their child thread. In either
1726 * case, the current action should determine the next thread schedule.
1728 * @param curr The current action
1729 * @return The next thread to run, if the current action will determine this
1730 * selection; otherwise NULL
1732 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1734 /* Do not split atomic RMW */
1735 if (curr->is_rmwr())
1736 return get_thread(curr);
1737 /* Follow CREATE with the created thread */
1738 /* which is not needed, because model.cc takes care of this */
1739 if (curr->get_type() == THREAD_CREATE)
1740 return curr->get_thread_operand();
1741 if (curr->get_type() == PTHREAD_CREATE) {
1742 return curr->get_thread_operand();
1748 * Takes the next step in the execution, if possible.
1749 * @param curr The current step to take
1750 * @return Returns the next Thread to run, if any; NULL if this execution
1753 Thread * ModelExecution::take_step(ModelAction *curr)
1755 Thread *curr_thrd = get_thread(curr);
1756 ASSERT(curr_thrd->get_state() == THREAD_READY);
1758 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1759 curr = check_current_action(curr);
1762 /* Process this action in ModelHistory for records */
1764 model->get_history()->process_action( curr, curr->get_tid() );
1766 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1767 scheduler->remove_thread(curr_thrd);
1769 return action_select_next_thread(curr);
1772 /** This method removes references to an Action before we delete it. */
1774 void ModelExecution::removeAction(ModelAction *act) {
1776 action_trace.removeAction(act);
1779 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1780 (*vec)[act->get_tid()].removeAction(act);
1782 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1783 sllnode<ModelAction *> * listref = act->getActionRef();
1784 if (listref != NULL) {
1785 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1786 list->erase(listref);
1788 } else if (act->is_wait()) {
1789 sllnode<ModelAction *> * listref = act->getActionRef();
1790 if (listref != NULL) {
1791 void *mutex_loc = (void *) act->get_value();
1792 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1794 } else if (act->is_free()) {
1795 sllnode<ModelAction *> * listref = act->getActionRef();
1796 if (listref != NULL) {
1797 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1798 (*vec)[act->get_tid()].erase(listref);
1801 //Clear it from last_sc_map
1802 if (obj_last_sc_map.get(act->get_location()) == act) {
1803 obj_last_sc_map.remove(act->get_location());
1806 //Remove from Cyclegraph
1807 mo_graph->freeAction(act);
1811 /** Computes clock vector that all running threads have already synchronized to. */
1813 ClockVector * ModelExecution::computeMinimalCV() {
1814 ClockVector *cvmin = NULL;
1815 //Thread 0 isn't a real thread, so skip it..
1816 for(unsigned int i = 1;i < thread_map.size();i++) {
1817 Thread * t = thread_map[i];
1818 if (t->get_state() == THREAD_COMPLETED)
1820 thread_id_t tid = int_to_id(i);
1821 ClockVector * cv = get_cv(tid);
1823 cvmin = new ClockVector(cv, NULL);
1825 cvmin->minmerge(cv);
1831 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1833 void ModelExecution::fixupLastAct(ModelAction *act) {
1834 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1835 newact->set_seq_number(get_next_seq_num());
1836 newact->create_cv(act);
1837 newact->set_last_fence_release(act->get_last_fence_release());
1838 add_action_to_lists(newact, false);
1841 /** Compute which actions to free. */
1843 void ModelExecution::collectActions() {
1844 if (priv->used_sequence_numbers < params->traceminsize)
1847 //Compute minimal clock vector for all live threads
1848 ClockVector *cvmin = computeMinimalCV();
1849 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1850 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1852 //Next walk action trace... When we hit an action, see if it is
1853 //invisible (e.g., earlier than the first before the minimum
1854 //clock for the thread... if so erase it and all previous
1855 //actions in cyclegraph
1856 sllnode<ModelAction*> * it;
1857 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1858 ModelAction *act = it->getVal();
1859 modelclock_t actseq = act->get_seq_number();
1861 //See if we are done
1862 if (actseq > maxtofree)
1865 thread_id_t act_tid = act->get_tid();
1866 modelclock_t tid_clock = cvmin->getClock(act_tid);
1868 //Free if it is invisible or we have set a flag to remove visible actions.
1869 if (actseq <= tid_clock || params->removevisible) {
1870 ModelAction * write;
1871 if (act->is_write()) {
1873 } else if (act->is_read()) {
1874 write = act->get_reads_from();
1878 //Mark everything earlier in MO graph to be freed
1879 CycleNode * cn = mo_graph->getNode_noCreate(write);
1881 queue->push_back(cn);
1882 while(!queue->empty()) {
1883 CycleNode * node = queue->back();
1885 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1886 CycleNode * prevnode = node->getInEdge(i);
1887 ModelAction * prevact = prevnode->getAction();
1888 if (prevact->get_type() != READY_FREE) {
1889 prevact->set_free();
1890 queue->push_back(prevnode);
1898 //We may need to remove read actions in the window we don't delete to preserve correctness.
1900 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1901 ModelAction *act = it2->getVal();
1902 //Do iteration early in case we delete the act
1904 bool islastact = false;
1905 ModelAction *lastact = get_last_action(act->get_tid());
1906 if (act == lastact) {
1907 Thread * th = get_thread(act);
1908 islastact = !th->is_complete();
1911 if (act->is_read()) {
1912 if (act->get_reads_from()->is_free()) {
1913 if (act->is_rmw()) {
1914 //Weaken a RMW from a freed store to a write
1915 act->set_type(ATOMIC_WRITE);
1927 //If we don't delete the action, we should remove references to release fences
1929 const ModelAction *rel_fence =act->get_last_fence_release();
1930 if (rel_fence != NULL) {
1931 modelclock_t relfenceseq = rel_fence->get_seq_number();
1932 thread_id_t relfence_tid = rel_fence->get_tid();
1933 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1934 //Remove references to irrelevant release fences
1935 if (relfenceseq <= tid_clock)
1936 act->set_last_fence_release(NULL);
1939 //Now we are in the window of old actions that we remove if possible
1940 for (;it != NULL;) {
1941 ModelAction *act = it->getVal();
1942 //Do iteration early since we may delete node...
1944 bool islastact = false;
1945 ModelAction *lastact = get_last_action(act->get_tid());
1946 if (act == lastact) {
1947 Thread * th = get_thread(act);
1948 islastact = !th->is_complete();
1951 if (act->is_read()) {
1952 if (act->get_reads_from()->is_free()) {
1953 if (act->is_rmw()) {
1954 act->set_type(ATOMIC_WRITE);
1964 } else if (act->is_free()) {
1971 } else if (act->is_write()) {
1972 //Do nothing with write that hasn't been marked to be freed
1973 } else if (islastact) {
1974 //Keep the last action for non-read/write actions
1975 } else if (act->is_fence()) {
1976 //Note that acquire fences can always be safely
1977 //removed, but could incur extra overheads in
1978 //traversals. Removing them before the cvmin seems
1979 //like a good compromise.
1981 //Release fences before the cvmin don't do anything
1982 //because everyone has already synchronized.
1984 //Sequentially fences before cvmin are redundant
1985 //because happens-before will enforce same
1988 modelclock_t actseq = act->get_seq_number();
1989 thread_id_t act_tid = act->get_tid();
1990 modelclock_t tid_clock = cvmin->getClock(act_tid);
1991 if (actseq <= tid_clock) {
1993 // Remove reference to act from thrd_last_fence_release
1994 int thread_id = id_to_int( act->get_tid() );
1995 if (thrd_last_fence_release[thread_id] == act) {
1996 thrd_last_fence_release[thread_id] = NULL;
2002 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
2003 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
2004 //need to keep most recent unlock/wait for each lock
2005 if(act->is_unlock() || act->is_wait()) {
2006 ModelAction * lastlock = get_last_unlock(act);
2007 if (lastlock != act) {
2012 } else if (act->is_create()) {
2013 if (act->get_thread_operand()->is_complete()) {
2025 //If we don't delete the action, we should remove references to release fences
2026 const ModelAction *rel_fence =act->get_last_fence_release();
2027 if (rel_fence != NULL) {
2028 modelclock_t relfenceseq = rel_fence->get_seq_number();
2029 thread_id_t relfence_tid = rel_fence->get_tid();
2030 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2031 //Remove references to irrelevant release fences
2032 if (relfenceseq <= tid_clock)
2033 act->set_last_fence_release(NULL);
2041 Fuzzer * ModelExecution::getFuzzer() {