11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 static unsigned int atomic_load_count = 0;
24 static unsigned int atomic_store_count = 0;
25 static unsigned int atomic_rmw_count = 0;
27 static unsigned int atomic_fence_count = 0;
28 static unsigned int atomic_lock_count = 0;
29 static unsigned int atomic_trylock_count = 0;
30 static unsigned int atomic_unlock_count = 0;
31 static unsigned int atomic_notify_count = 0;
32 static unsigned int atomic_wait_count = 0;
33 static unsigned int atomic_timedwait_count = 0;
37 * Structure for holding small ModelChecker members that should be snapshotted
39 struct model_snapshot_members {
40 model_snapshot_members() :
41 /* First thread created will have id INITIAL_THREAD_ID */
42 next_thread_id(INITIAL_THREAD_ID),
43 used_sequence_numbers(0),
48 ~model_snapshot_members() {
49 for (unsigned int i = 0;i < bugs.size();i++)
54 unsigned int next_thread_id;
55 modelclock_t used_sequence_numbers;
56 SnapVector<bug_message *> bugs;
57 /** @brief Incorrectly-ordered synchronization was made */
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
68 thread_map(2), /* We'll always need at least 2 threads */
73 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 priv(new struct model_snapshot_members ()),
82 mo_graph(new CycleGraph()),
84 fuzzer(new NewFuzzer()),
90 /* Initialize a model-checker thread, for special ModelActions */
91 model_thread = new Thread(get_next_id());
92 add_thread(model_thread);
93 fuzzer->register_engine(m, this);
94 scheduler->register_engine(this);
96 pthread_key_create(&pthreadkey, tlsdestructor);
100 /** @brief Destructor */
101 ModelExecution::~ModelExecution()
103 for (unsigned int i = 0;i < get_num_threads();i++)
104 delete get_thread(int_to_id(i));
110 int ModelExecution::get_execution_number() const
112 return model->get_execution_number();
115 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
117 SnapVector<action_list_t> *tmp = hash->get(ptr);
119 tmp = new SnapVector<action_list_t>();
125 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
127 simple_action_list_t *tmp = hash->get(ptr);
129 tmp = new simple_action_list_t();
135 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
137 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
139 tmp = new SnapVector<simple_action_list_t>();
146 * When vectors of action lists are reallocated due to resize, the root address of
147 * action lists may change. Hence we need to fix the parent pointer of the children
150 static void fixup_action_list(SnapVector<action_list_t> * vec)
152 for (uint i = 0;i < vec->size();i++) {
153 action_list_t * list = &(*vec)[i];
160 static inline void record_atomic_stats(ModelAction * act)
162 switch (act->get_type()) {
164 atomic_store_count++;
173 atomic_fence_count++;
179 atomic_trylock_count++;
182 atomic_unlock_count++;
184 case ATOMIC_NOTIFY_ONE:
185 case ATOMIC_NOTIFY_ALL:
186 atomic_notify_count++;
191 case ATOMIC_TIMEDWAIT:
192 atomic_timedwait_count++;
198 void print_atomic_accesses()
200 model_print("atomic store count: %u\n", atomic_store_count);
201 model_print("atomic load count: %u\n", atomic_load_count);
202 model_print("atomic rmw count: %u\n", atomic_rmw_count);
204 model_print("atomic fence count: %u\n", atomic_fence_count);
205 model_print("atomic lock count: %u\n", atomic_lock_count);
206 model_print("atomic trylock count: %u\n", atomic_trylock_count);
207 model_print("atomic unlock count: %u\n", atomic_unlock_count);
208 model_print("atomic notify count: %u\n", atomic_notify_count);
209 model_print("atomic wait count: %u\n", atomic_wait_count);
210 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
213 /** @return a thread ID for a new Thread */
214 thread_id_t ModelExecution::get_next_id()
216 return priv->next_thread_id++;
219 /** @return the number of user threads created during this execution */
220 unsigned int ModelExecution::get_num_threads() const
222 return priv->next_thread_id;
225 /** @return a sequence number for a new ModelAction */
226 modelclock_t ModelExecution::get_next_seq_num()
228 return ++priv->used_sequence_numbers;
231 /** @return a sequence number for a new ModelAction */
232 modelclock_t ModelExecution::get_curr_seq_num()
234 return priv->used_sequence_numbers;
237 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
238 void ModelExecution::restore_last_seq_num()
240 priv->used_sequence_numbers--;
244 * @brief Should the current action wake up a given thread?
246 * @param curr The current action
247 * @param thread The thread that we might wake up
248 * @return True, if we should wake up the sleeping thread; false otherwise
250 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
252 const ModelAction *asleep = thread->get_pending();
253 /* Don't allow partial RMW to wake anyone up */
256 /* Synchronizing actions may have been backtracked */
257 if (asleep->could_synchronize_with(curr))
259 /* All acquire/release fences and fence-acquire/store-release */
260 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
262 /* Fence-release + store can awake load-acquire on the same location */
263 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
264 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
265 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
268 /* The sleep is literally sleeping */
269 if (asleep->is_sleep()) {
270 if (fuzzer->shouldWake(asleep))
277 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
279 for (unsigned int i = 0;i < get_num_threads();i++) {
280 Thread *thr = get_thread(int_to_id(i));
281 if (scheduler->is_sleep_set(thr)) {
282 if (should_wake_up(curr, thr)) {
283 /* Remove this thread from sleep set */
284 scheduler->remove_sleep(thr);
285 if (thr->get_pending()->is_sleep())
286 thr->set_wakeup_state(true);
292 void ModelExecution::assert_bug(const char *msg)
294 priv->bugs.push_back(new bug_message(msg));
298 /** @return True, if any bugs have been reported for this execution */
299 bool ModelExecution::have_bug_reports() const
301 return priv->bugs.size() != 0;
304 SnapVector<bug_message *> * ModelExecution::get_bugs() const
310 * Check whether the current trace has triggered an assertion which should halt
313 * @return True, if the execution should be aborted; false otherwise
315 bool ModelExecution::has_asserted() const
317 return priv->asserted;
321 * Trigger a trace assertion which should cause this execution to be halted.
322 * This can be due to a detected bug or due to an infeasibility that should
325 void ModelExecution::set_assert()
327 priv->asserted = true;
331 * Check if we are in a deadlock. Should only be called at the end of an
332 * execution, although it should not give false positives in the middle of an
333 * execution (there should be some ENABLED thread).
335 * @return True if program is in a deadlock; false otherwise
337 bool ModelExecution::is_deadlocked() const
339 bool blocking_threads = false;
340 for (unsigned int i = 0;i < get_num_threads();i++) {
341 thread_id_t tid = int_to_id(i);
344 Thread *t = get_thread(tid);
345 if (!t->is_model_thread() && t->get_pending())
346 blocking_threads = true;
348 return blocking_threads;
352 * Check if this is a complete execution. That is, have all thread completed
353 * execution (rather than exiting because sleep sets have forced a redundant
356 * @return True if the execution is complete.
358 bool ModelExecution::is_complete_execution() const
360 for (unsigned int i = 0;i < get_num_threads();i++)
361 if (is_enabled(int_to_id(i)))
366 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
367 uint64_t value = *((const uint64_t *) location);
368 modelclock_t storeclock;
369 thread_id_t storethread;
370 getStoreThreadAndClock(location, &storethread, &storeclock);
371 setAtomicStoreFlag(location);
372 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
373 act->set_seq_number(storeclock);
374 add_normal_write_to_lists(act);
375 add_write_to_lists(act);
376 w_modification_order(act);
378 model->get_history()->process_action(act, act->get_tid());
384 * Processes a read model action.
385 * @param curr is the read model action to process.
386 * @param rf_set is the set of model actions we can possibly read from
387 * @return True if the read can be pruned from the thread map list.
389 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
391 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
392 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
393 if (hasnonatomicstore) {
394 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
395 rf_set->push_back(nonatomicstore);
398 // Remove writes that violate read modification order
401 while (i < rf_set->size()) {
402 ModelAction * rf = (*rf_set)[i];
403 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
404 (*rf_set)[i] = rf_set->back();
411 int index = fuzzer->selectWrite(curr, rf_set);
413 ModelAction *rf = (*rf_set)[index];
416 bool canprune = false;
417 if (r_modification_order(curr, rf, priorset, &canprune)) {
418 for(unsigned int i=0;i<priorset->size();i++) {
419 mo_graph->addEdge((*priorset)[i], rf);
422 get_thread(curr)->set_return_value(curr->get_return_value());
424 //Update acquire fence clock vector
425 ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
427 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
428 return canprune && (curr->get_type() == ATOMIC_READ);
431 (*rf_set)[index] = rf_set->back();
437 * Processes a lock, trylock, or unlock model action. @param curr is
438 * the read model action to process.
440 * The try lock operation checks whether the lock is taken. If not,
441 * it falls to the normal lock operation case. If so, it returns
444 * The lock operation has already been checked that it is enabled, so
445 * it just grabs the lock and synchronizes with the previous unlock.
447 * The unlock operation has to re-enable all of the threads that are
448 * waiting on the lock.
450 * @return True if synchronization was updated; false otherwise
452 bool ModelExecution::process_mutex(ModelAction *curr)
454 cdsc::mutex *mutex = curr->get_mutex();
455 struct cdsc::mutex_state *state = NULL;
458 state = mutex->get_state();
460 switch (curr->get_type()) {
461 case ATOMIC_TRYLOCK: {
462 bool success = !state->locked;
463 curr->set_try_lock(success);
465 get_thread(curr)->set_return_value(0);
468 get_thread(curr)->set_return_value(1);
470 //otherwise fall into the lock case
472 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
473 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
474 // assert_bug("Lock access before initialization");
476 // TODO: lock count for recursive mutexes
477 state->locked = get_thread(curr);
478 ModelAction *unlock = get_last_unlock(curr);
479 //synchronize with the previous unlock statement
480 if (unlock != NULL) {
481 synchronize(unlock, curr);
487 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
488 if (fuzzer->shouldWait(curr)) {
489 /* wake up the other threads */
490 for (unsigned int i = 0;i < get_num_threads();i++) {
491 Thread *t = get_thread(int_to_id(i));
492 Thread *curr_thrd = get_thread(curr);
493 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
497 /* unlock the lock - after checking who was waiting on it */
498 state->locked = NULL;
500 /* remove old wait action and disable this thread */
501 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
502 for (sllnode<ModelAction *> * it = waiters->begin();it != NULL;it = it->getNext()) {
503 ModelAction * wait = it->getVal();
504 if (wait->get_tid() == curr->get_tid()) {
510 waiters->push_back(curr);
511 scheduler->sleep(get_thread(curr));
516 case ATOMIC_TIMEDWAIT:
517 case ATOMIC_UNLOCK: {
518 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
519 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
520 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
521 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
522 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
523 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
524 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
525 //MUST EVENMTUALLY RELEASE...
527 // TODO: lock count for recursive mutexes
528 /* wake up the other threads */
529 for (unsigned int i = 0;i < get_num_threads();i++) {
530 Thread *t = get_thread(int_to_id(i));
531 Thread *curr_thrd = get_thread(curr);
532 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
536 /* unlock the lock - after checking who was waiting on it */
537 state->locked = NULL;
540 case ATOMIC_NOTIFY_ALL: {
541 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
542 //activate all the waiting threads
543 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
544 scheduler->wake(get_thread(rit->getVal()));
549 case ATOMIC_NOTIFY_ONE: {
550 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
551 if (waiters->size() != 0) {
552 Thread * thread = fuzzer->selectNotify(waiters);
553 scheduler->wake(thread);
565 * Process a write ModelAction
566 * @param curr The ModelAction to process
567 * @return True if the mo_graph was updated or promises were resolved
569 void ModelExecution::process_write(ModelAction *curr)
571 w_modification_order(curr);
572 get_thread(curr)->set_return_value(VALUE_NONE);
576 * Process a fence ModelAction
577 * @param curr The ModelAction to process
578 * @return True if synchronization was updated
580 void ModelExecution::process_fence(ModelAction *curr)
583 * fence-relaxed: no-op
584 * fence-release: only log the occurence (not in this function), for
585 * use in later synchronization
586 * fence-acquire (this function): search for hypothetical release
588 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
590 if (curr->is_acquire()) {
591 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
596 * @brief Process the current action for thread-related activity
598 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
599 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
600 * synchronization, etc. This function is a no-op for non-THREAD actions
601 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
603 * @param curr The current action
604 * @return True if synchronization was updated or a thread completed
606 void ModelExecution::process_thread_action(ModelAction *curr)
608 switch (curr->get_type()) {
609 case THREAD_CREATE: {
610 thrd_t *thrd = (thrd_t *)curr->get_location();
611 struct thread_params *params = (struct thread_params *)curr->get_value();
612 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
613 curr->set_thread_operand(th);
615 th->set_creation(curr);
618 case PTHREAD_CREATE: {
619 (*(uint32_t *)curr->get_location()) = pthread_counter++;
621 struct pthread_params *params = (struct pthread_params *)curr->get_value();
622 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
623 curr->set_thread_operand(th);
625 th->set_creation(curr);
627 if ( pthread_map.size() < pthread_counter )
628 pthread_map.resize( pthread_counter );
629 pthread_map[ pthread_counter-1 ] = th;
634 Thread *blocking = curr->get_thread_operand();
635 ModelAction *act = get_last_action(blocking->get_id());
636 synchronize(act, curr);
640 Thread *blocking = curr->get_thread_operand();
641 ModelAction *act = get_last_action(blocking->get_id());
642 synchronize(act, curr);
646 case THREADONLY_FINISH:
647 case THREAD_FINISH: {
648 Thread *th = get_thread(curr);
649 if (curr->get_type() == THREAD_FINISH &&
650 th == model->getInitThread()) {
656 /* Wake up any joining threads */
657 for (unsigned int i = 0;i < get_num_threads();i++) {
658 Thread *waiting = get_thread(int_to_id(i));
659 if (waiting->waiting_on() == th &&
660 waiting->get_pending()->is_thread_join())
661 scheduler->wake(waiting);
670 Thread *th = get_thread(curr);
671 th->set_pending(curr);
672 scheduler->add_sleep(th);
681 * Initialize the current action by performing one or more of the following
682 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
683 * manipulating backtracking sets, allocating and
684 * initializing clock vectors, and computing the promises to fulfill.
686 * @param curr The current action, as passed from the user context; may be
687 * freed/invalidated after the execution of this function, with a different
688 * action "returned" its place (pass-by-reference)
689 * @return True if curr is a newly-explored action; false otherwise
691 bool ModelExecution::initialize_curr_action(ModelAction **curr)
693 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
694 ModelAction *newcurr = process_rmw(*curr);
700 ModelAction *newcurr = *curr;
702 newcurr->set_seq_number(get_next_seq_num());
703 /* Always compute new clock vector */
704 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
706 /* Assign most recent release fence */
707 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
709 return true; /* This was a new ModelAction */
714 * @brief Establish reads-from relation between two actions
716 * Perform basic operations involved with establishing a concrete rf relation,
717 * including setting the ModelAction data and checking for release sequences.
719 * @param act The action that is reading (must be a read)
720 * @param rf The action from which we are reading (must be a write)
722 * @return True if this read established synchronization
725 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
728 ASSERT(rf->is_write());
730 act->set_read_from(rf);
731 if (act->is_acquire()) {
732 ClockVector *cv = get_hb_from_write(rf);
735 act->get_cv()->merge(cv);
740 * @brief Synchronizes two actions
742 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
743 * This function performs the synchronization as well as providing other hooks
744 * for other checks along with synchronization.
746 * @param first The left-hand side of the synchronizes-with relation
747 * @param second The right-hand side of the synchronizes-with relation
748 * @return True if the synchronization was successful (i.e., was consistent
749 * with the execution order); false otherwise
751 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
753 if (*second < *first) {
754 ASSERT(0); //This should not happend
757 return second->synchronize_with(first);
761 * @brief Check whether a model action is enabled.
763 * Checks whether an operation would be successful (i.e., is a lock already
764 * locked, or is the joined thread already complete).
766 * For yield-blocking, yields are never enabled.
768 * @param curr is the ModelAction to check whether it is enabled.
769 * @return a bool that indicates whether the action is enabled.
771 bool ModelExecution::check_action_enabled(ModelAction *curr) {
772 if (curr->is_lock()) {
773 cdsc::mutex *lock = curr->get_mutex();
774 struct cdsc::mutex_state *state = lock->get_state();
776 Thread *lock_owner = (Thread *)state->locked;
777 Thread *curr_thread = get_thread(curr);
778 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
784 } else if (curr->is_thread_join()) {
785 Thread *blocking = curr->get_thread_operand();
786 if (!blocking->is_complete()) {
789 } else if (curr->is_sleep()) {
790 if (!fuzzer->shouldSleep(curr))
798 * This is the heart of the model checker routine. It performs model-checking
799 * actions corresponding to a given "current action." Among other processes, it
800 * calculates reads-from relationships, updates synchronization clock vectors,
801 * forms a memory_order constraints graph, and handles replay/backtrack
802 * execution when running permutations of previously-observed executions.
804 * @param curr The current action to process
805 * @return The ModelAction that is actually executed; may be different than
808 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
811 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
812 bool newly_explored = initialize_curr_action(&curr);
816 wake_up_sleeping_actions(curr);
818 SnapVector<ModelAction *> * rf_set = NULL;
819 /* Build may_read_from set for newly-created actions */
820 if (newly_explored && curr->is_read())
821 rf_set = build_may_read_from(curr);
823 bool canprune = false;
825 if (curr->is_read() && !second_part_of_rmw) {
826 canprune = process_read(curr, rf_set);
829 ASSERT(rf_set == NULL);
831 /* Add the action to lists */
832 if (!second_part_of_rmw) {
834 record_atomic_stats(curr);
836 add_action_to_lists(curr, canprune);
839 if (curr->is_write())
840 add_write_to_lists(curr);
842 process_thread_action(curr);
844 if (curr->is_write())
847 if (curr->is_fence())
850 if (curr->is_mutex_op())
856 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
857 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
858 ModelAction *lastread = get_last_action(act->get_tid());
859 lastread->process_rmw(act);
861 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
867 * @brief Updates the mo_graph with the constraints imposed from the current
870 * Basic idea is the following: Go through each other thread and find
871 * the last action that happened before our read. Two cases:
873 * -# The action is a write: that write must either occur before
874 * the write we read from or be the write we read from.
875 * -# The action is a read: the write that that action read from
876 * must occur before the write we read from or be the same write.
878 * @param curr The current action. Must be a read.
879 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
880 * @param check_only If true, then only check whether the current action satisfies
881 * read modification order or not, without modifiying priorset and canprune.
883 * @return True if modification order edges were added; false otherwise
886 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
887 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
889 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
890 ASSERT(curr->is_read());
892 /* Last SC fence in the current thread */
893 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
895 int tid = curr->get_tid();
897 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
898 if ((int)thrd_lists->size() <= tid) {
899 uint oldsize = thrd_lists->size();
900 thrd_lists->resize(priv->next_thread_id);
901 for(uint i = oldsize;i < priv->next_thread_id;i++)
902 new (&(*thrd_lists)[i]) action_list_t();
904 fixup_action_list(thrd_lists);
907 ModelAction *prev_same_thread = NULL;
908 /* Iterate over all threads */
909 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
910 /* Last SC fence in thread tid */
911 ModelAction *last_sc_fence_thread_local = NULL;
913 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
915 /* Last SC fence in thread tid, before last SC fence in current thread */
916 ModelAction *last_sc_fence_thread_before = NULL;
917 if (last_sc_fence_local)
918 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
920 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
921 if (prev_same_thread != NULL &&
922 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
923 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
927 /* Iterate over actions in thread, starting from most recent */
928 action_list_t *list = &(*thrd_lists)[tid];
929 sllnode<ModelAction *> * rit;
930 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
931 ModelAction *act = rit->getVal();
936 /* Don't want to add reflexive edges on 'rf' */
937 if (act->equals(rf)) {
938 if (act->happens_before(curr))
944 if (act->is_write()) {
945 /* C++, Section 29.3 statement 5 */
946 if (curr->is_seqcst() && last_sc_fence_thread_local &&
947 *act < *last_sc_fence_thread_local) {
948 if (mo_graph->checkReachable(rf, act))
951 priorset->push_back(act);
954 /* C++, Section 29.3 statement 4 */
955 else if (act->is_seqcst() && last_sc_fence_local &&
956 *act < *last_sc_fence_local) {
957 if (mo_graph->checkReachable(rf, act))
960 priorset->push_back(act);
963 /* C++, Section 29.3 statement 6 */
964 else if (last_sc_fence_thread_before &&
965 *act < *last_sc_fence_thread_before) {
966 if (mo_graph->checkReachable(rf, act))
969 priorset->push_back(act);
975 * Include at most one act per-thread that "happens
978 if (act->happens_before(curr)) {
980 if (last_sc_fence_local == NULL ||
981 (*last_sc_fence_local < *act)) {
982 prev_same_thread = act;
985 if (act->is_write()) {
986 if (mo_graph->checkReachable(rf, act))
989 priorset->push_back(act);
991 ModelAction *prevrf = act->get_reads_from();
992 if (!prevrf->equals(rf)) {
993 if (mo_graph->checkReachable(rf, prevrf))
996 priorset->push_back(prevrf);
998 if (act->get_tid() == curr->get_tid()) {
999 //Can prune curr from obj list
1013 * Updates the mo_graph with the constraints imposed from the current write.
1015 * Basic idea is the following: Go through each other thread and find
1016 * the lastest action that happened before our write. Two cases:
1018 * (1) The action is a write => that write must occur before
1021 * (2) The action is a read => the write that that action read from
1022 * must occur before the current write.
1024 * This method also handles two other issues:
1026 * (I) Sequential Consistency: Making sure that if the current write is
1027 * seq_cst, that it occurs after the previous seq_cst write.
1029 * (II) Sending the write back to non-synchronizing reads.
1031 * @param curr The current action. Must be a write.
1032 * @param send_fv A vector for stashing reads to which we may pass our future
1033 * value. If NULL, then don't record any future values.
1034 * @return True if modification order edges were added; false otherwise
1036 void ModelExecution::w_modification_order(ModelAction *curr)
1038 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1040 ASSERT(curr->is_write());
1042 SnapList<ModelAction *> edgeset;
1044 if (curr->is_seqcst()) {
1045 /* We have to at least see the last sequentially consistent write,
1046 so we are initialized. */
1047 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1048 if (last_seq_cst != NULL) {
1049 edgeset.push_back(last_seq_cst);
1051 //update map for next query
1052 obj_last_sc_map.put(curr->get_location(), curr);
1055 /* Last SC fence in the current thread */
1056 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1058 /* Iterate over all threads */
1059 for (i = 0;i < thrd_lists->size();i++) {
1060 /* Last SC fence in thread i, before last SC fence in current thread */
1061 ModelAction *last_sc_fence_thread_before = NULL;
1062 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1063 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1065 /* Iterate over actions in thread, starting from most recent */
1066 action_list_t *list = &(*thrd_lists)[i];
1067 sllnode<ModelAction*>* rit;
1068 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1069 ModelAction *act = rit->getVal();
1072 * 1) If RMW and it actually read from something, then we
1073 * already have all relevant edges, so just skip to next
1076 * 2) If RMW and it didn't read from anything, we should
1077 * whatever edge we can get to speed up convergence.
1079 * 3) If normal write, we need to look at earlier actions, so
1080 * continue processing list.
1082 if (curr->is_rmw()) {
1083 if (curr->get_reads_from() != NULL)
1091 /* C++, Section 29.3 statement 7 */
1092 if (last_sc_fence_thread_before && act->is_write() &&
1093 *act < *last_sc_fence_thread_before) {
1094 edgeset.push_back(act);
1099 * Include at most one act per-thread that "happens
1102 if (act->happens_before(curr)) {
1104 * Note: if act is RMW, just add edge:
1106 * The following edge should be handled elsewhere:
1107 * readfrom(act) --mo--> act
1109 if (act->is_write())
1110 edgeset.push_back(act);
1111 else if (act->is_read()) {
1112 //if previous read accessed a null, just keep going
1113 edgeset.push_back(act->get_reads_from());
1119 mo_graph->addEdges(&edgeset, curr);
1124 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1125 * some constraints. This method checks one the following constraint (others
1126 * require compiler support):
1128 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1129 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1131 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1133 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1135 /* Iterate over all threads */
1136 for (i = 0;i < thrd_lists->size();i++) {
1137 const ModelAction *write_after_read = NULL;
1139 /* Iterate over actions in thread, starting from most recent */
1140 action_list_t *list = &(*thrd_lists)[i];
1141 sllnode<ModelAction *>* rit;
1142 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1143 ModelAction *act = rit->getVal();
1145 /* Don't disallow due to act == reader */
1146 if (!reader->happens_before(act) || reader == act)
1148 else if (act->is_write())
1149 write_after_read = act;
1150 else if (act->is_read() && act->get_reads_from() != NULL)
1151 write_after_read = act->get_reads_from();
1154 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1161 * Computes the clock vector that happens before propagates from this write.
1163 * @param rf The action that might be part of a release sequence. Must be a
1165 * @return ClockVector of happens before relation.
1168 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1169 SnapVector<ModelAction *> * processset = NULL;
1170 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1171 ASSERT(rf->is_write());
1172 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1174 if (processset == NULL)
1175 processset = new SnapVector<ModelAction *>();
1176 processset->push_back(rf);
1179 int i = (processset == NULL) ? 0 : processset->size();
1181 ClockVector * vec = NULL;
1183 if (rf->get_rfcv() != NULL) {
1184 vec = rf->get_rfcv();
1185 } else if (rf->is_acquire() && rf->is_release()) {
1187 } else if (rf->is_release() && !rf->is_rmw()) {
1189 } else if (rf->is_release()) {
1190 //have rmw that is release and doesn't have a rfcv
1191 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1194 //operation that isn't release
1195 if (rf->get_last_fence_release()) {
1197 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1199 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1205 rf = (*processset)[i];
1209 if (processset != NULL)
1215 * Performs various bookkeeping operations for the current ModelAction. For
1216 * instance, adds action to the per-object, per-thread action vector and to the
1217 * action trace list of all thread actions.
1219 * @param act is the ModelAction to add.
1221 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1223 int tid = id_to_int(act->get_tid());
1224 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1225 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1226 act->setActionRef(list->add_back(act));
1229 // Update action trace, a total order of all actions
1230 action_trace.addAction(act);
1233 // Update obj_thrd_map, a per location, per thread, order of actions
1234 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1235 if ((int)vec->size() <= tid) {
1236 uint oldsize = vec->size();
1237 vec->resize(priv->next_thread_id);
1238 for(uint i = oldsize;i < priv->next_thread_id;i++)
1239 new (&(*vec)[i]) action_list_t();
1241 fixup_action_list(vec);
1243 if (!canprune && (act->is_read() || act->is_write()))
1244 (*vec)[tid].addAction(act);
1246 // Update thrd_last_action, the last action taken by each thread
1247 if ((int)thrd_last_action.size() <= tid)
1248 thrd_last_action.resize(get_num_threads());
1249 thrd_last_action[tid] = act;
1251 // Update thrd_last_fence_release, the last release fence taken by each thread
1252 if (act->is_fence() && act->is_release()) {
1253 if ((int)thrd_last_fence_release.size() <= tid)
1254 thrd_last_fence_release.resize(get_num_threads());
1255 thrd_last_fence_release[tid] = act;
1258 if (act->is_wait()) {
1259 void *mutex_loc = (void *) act->get_value();
1260 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1264 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1265 list->addAction(act);
1268 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1269 act->create_cv(NULL);
1270 list->addAction(act);
1274 * Performs various bookkeeping operations for a normal write. The
1275 * complication is that we are typically inserting a normal write
1276 * lazily, so we need to insert it into the middle of lists.
1278 * @param act is the ModelAction to add.
1281 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1283 int tid = id_to_int(act->get_tid());
1284 insertIntoActionListAndSetCV(&action_trace, act);
1286 // Update obj_thrd_map, a per location, per thread, order of actions
1287 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1288 if (tid >= (int)vec->size()) {
1289 uint oldsize =vec->size();
1290 vec->resize(priv->next_thread_id);
1291 for(uint i=oldsize;i<priv->next_thread_id;i++)
1292 new (&(*vec)[i]) action_list_t();
1294 fixup_action_list(vec);
1296 insertIntoActionList(&(*vec)[tid],act);
1298 ModelAction * lastact = thrd_last_action[tid];
1299 // Update thrd_last_action, the last action taken by each thrad
1300 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1301 thrd_last_action[tid] = act;
1305 void ModelExecution::add_write_to_lists(ModelAction *write) {
1306 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1307 int tid = id_to_int(write->get_tid());
1308 if (tid >= (int)vec->size()) {
1309 uint oldsize =vec->size();
1310 vec->resize(priv->next_thread_id);
1311 for(uint i=oldsize;i<priv->next_thread_id;i++)
1312 new (&(*vec)[i]) simple_action_list_t();
1314 write->setActionRef((*vec)[tid].add_back(write));
1318 * @brief Get the last action performed by a particular Thread
1319 * @param tid The thread ID of the Thread in question
1320 * @return The last action in the thread
1322 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1324 int threadid = id_to_int(tid);
1325 if (threadid < (int)thrd_last_action.size())
1326 return thrd_last_action[id_to_int(tid)];
1332 * @brief Get the last fence release performed by a particular Thread
1333 * @param tid The thread ID of the Thread in question
1334 * @return The last fence release in the thread, if one exists; NULL otherwise
1336 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1338 int threadid = id_to_int(tid);
1339 if (threadid < (int)thrd_last_fence_release.size())
1340 return thrd_last_fence_release[id_to_int(tid)];
1346 * Gets the last memory_order_seq_cst write (in the total global sequence)
1347 * performed on a particular object (i.e., memory location), not including the
1349 * @param curr The current ModelAction; also denotes the object location to
1351 * @return The last seq_cst write
1353 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1355 void *location = curr->get_location();
1356 return obj_last_sc_map.get(location);
1360 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1361 * performed in a particular thread, prior to a particular fence.
1362 * @param tid The ID of the thread to check
1363 * @param before_fence The fence from which to begin the search; if NULL, then
1364 * search for the most recent fence in the thread.
1365 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1367 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1369 /* All fences should have location FENCE_LOCATION */
1370 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1375 sllnode<ModelAction*>* rit = list->end();
1378 for (;rit != NULL;rit=rit->getPrev())
1379 if (rit->getVal() == before_fence)
1382 ASSERT(rit->getVal() == before_fence);
1386 for (;rit != NULL;rit=rit->getPrev()) {
1387 ModelAction *act = rit->getVal();
1388 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1395 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1396 * location). This function identifies the mutex according to the current
1397 * action, which is presumed to perform on the same mutex.
1398 * @param curr The current ModelAction; also denotes the object location to
1400 * @return The last unlock operation
1402 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1404 void *location = curr->get_location();
1406 simple_action_list_t *list = obj_map.get(location);
1410 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1411 sllnode<ModelAction*>* rit;
1412 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1413 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1414 return rit->getVal();
1418 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1420 ModelAction *parent = get_last_action(tid);
1422 parent = get_thread(tid)->get_creation();
1427 * Returns the clock vector for a given thread.
1428 * @param tid The thread whose clock vector we want
1429 * @return Desired clock vector
1431 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1433 ModelAction *firstaction=get_parent_action(tid);
1434 return firstaction != NULL ? firstaction->get_cv() : NULL;
1437 bool valequals(uint64_t val1, uint64_t val2, int size) {
1440 return ((uint8_t)val1) == ((uint8_t)val2);
1442 return ((uint16_t)val1) == ((uint16_t)val2);
1444 return ((uint32_t)val1) == ((uint32_t)val2);
1454 * Build up an initial set of all past writes that this 'read' action may read
1455 * from, as well as any previously-observed future values that must still be valid.
1457 * @param curr is the current ModelAction that we are exploring; it must be a
1460 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1462 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1464 ASSERT(curr->is_read());
1466 ModelAction *last_sc_write = NULL;
1468 if (curr->is_seqcst())
1469 last_sc_write = get_last_seq_cst_write(curr);
1471 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1473 /* Iterate over all threads */
1474 if (thrd_lists != NULL)
1475 for (i = 0;i < thrd_lists->size();i++) {
1476 /* Iterate over actions in thread, starting from most recent */
1477 simple_action_list_t *list = &(*thrd_lists)[i];
1478 sllnode<ModelAction *> * rit;
1479 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1480 ModelAction *act = rit->getVal();
1485 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1486 bool allow_read = true;
1488 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1491 /* Need to check whether we will have two RMW reading from the same value */
1492 if (curr->is_rmwr()) {
1493 /* It is okay if we have a failing CAS */
1494 if (!curr->is_rmwrcas() ||
1495 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1496 //Need to make sure we aren't the second RMW
1497 CycleNode * node = mo_graph->getNode_noCreate(act);
1498 if (node != NULL && node->getRMW() != NULL) {
1499 //we are the second RMW
1506 /* Only add feasible reads */
1507 rf_set->push_back(act);
1510 /* Include at most one act per-thread that "happens before" curr */
1511 if (act->happens_before(curr))
1516 if (DBG_ENABLED()) {
1517 model_print("Reached read action:\n");
1519 model_print("End printing read_from_past\n");
1524 static void print_list(action_list_t *list)
1526 sllnode<ModelAction*> *it;
1528 model_print("------------------------------------------------------------------------------------\n");
1529 model_print("# t Action type MO Location Value Rf CV\n");
1530 model_print("------------------------------------------------------------------------------------\n");
1532 unsigned int hash = 0;
1534 for (it = list->begin();it != NULL;it=it->getNext()) {
1535 const ModelAction *act = it->getVal();
1536 if (act->get_seq_number() > 0)
1538 hash = hash^(hash<<3)^(it->getVal()->hash());
1540 model_print("HASH %u\n", hash);
1541 model_print("------------------------------------------------------------------------------------\n");
1544 #if SUPPORT_MOD_ORDER_DUMP
1545 void ModelExecution::dumpGraph(char *filename)
1548 sprintf(buffer, "%s.dot", filename);
1549 FILE *file = fopen(buffer, "w");
1550 fprintf(file, "digraph %s {\n", filename);
1551 mo_graph->dumpNodes(file);
1552 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1554 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1555 ModelAction *act = it->getVal();
1556 if (act->is_read()) {
1557 mo_graph->dot_print_node(file, act);
1558 mo_graph->dot_print_edge(file,
1559 act->get_reads_from(),
1561 "label=\"rf\", color=red, weight=2");
1563 if (thread_array[act->get_tid()]) {
1564 mo_graph->dot_print_edge(file,
1565 thread_array[id_to_int(act->get_tid())],
1567 "label=\"sb\", color=blue, weight=400");
1570 thread_array[act->get_tid()] = act;
1572 fprintf(file, "}\n");
1573 model_free(thread_array);
1578 /** @brief Prints an execution trace summary. */
1579 void ModelExecution::print_summary()
1581 #if SUPPORT_MOD_ORDER_DUMP
1582 char buffername[100];
1583 sprintf(buffername, "exec%04u", get_execution_number());
1584 mo_graph->dumpGraphToFile(buffername);
1585 sprintf(buffername, "graph%04u", get_execution_number());
1586 dumpGraph(buffername);
1589 model_print("Execution trace %d:", get_execution_number());
1590 if (scheduler->all_threads_sleeping())
1591 model_print(" SLEEP-SET REDUNDANT");
1592 if (have_bug_reports())
1593 model_print(" DETECTED BUG(S)");
1597 print_list(&action_trace);
1602 void ModelExecution::print_tail()
1604 model_print("Execution trace %d:\n", get_execution_number());
1606 sllnode<ModelAction*> *it;
1608 model_print("------------------------------------------------------------------------------------\n");
1609 model_print("# t Action type MO Location Value Rf CV\n");
1610 model_print("------------------------------------------------------------------------------------\n");
1612 unsigned int hash = 0;
1616 SnapList<ModelAction *> list;
1617 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1618 if (counter > length)
1621 ModelAction * act = it->getVal();
1622 list.push_front(act);
1626 for (it = list.begin();it != NULL;it=it->getNext()) {
1627 const ModelAction *act = it->getVal();
1628 if (act->get_seq_number() > 0)
1630 hash = hash^(hash<<3)^(it->getVal()->hash());
1632 model_print("HASH %u\n", hash);
1633 model_print("------------------------------------------------------------------------------------\n");
1637 * Add a Thread to the system for the first time. Should only be called once
1639 * @param t The Thread to add
1641 void ModelExecution::add_thread(Thread *t)
1643 unsigned int i = id_to_int(t->get_id());
1644 if (i >= thread_map.size())
1645 thread_map.resize(i + 1);
1647 if (!t->is_model_thread())
1648 scheduler->add_thread(t);
1652 * @brief Get a Thread reference by its ID
1653 * @param tid The Thread's ID
1654 * @return A Thread reference
1656 Thread * ModelExecution::get_thread(thread_id_t tid) const
1658 unsigned int i = id_to_int(tid);
1659 if (i < thread_map.size())
1660 return thread_map[i];
1665 * @brief Get a reference to the Thread in which a ModelAction was executed
1666 * @param act The ModelAction
1667 * @return A Thread reference
1669 Thread * ModelExecution::get_thread(const ModelAction *act) const
1671 return get_thread(act->get_tid());
1675 * @brief Get a Thread reference by its pthread ID
1676 * @param index The pthread's ID
1677 * @return A Thread reference
1679 Thread * ModelExecution::get_pthread(pthread_t pid) {
1680 // pid 1 is reserved for the main thread, pthread ids should start from 2
1682 return get_thread(pid);
1689 uint32_t thread_id = x.v;
1691 if (thread_id < pthread_counter + 1)
1692 return pthread_map[thread_id];
1698 * @brief Check if a Thread is currently enabled
1699 * @param t The Thread to check
1700 * @return True if the Thread is currently enabled
1702 bool ModelExecution::is_enabled(Thread *t) const
1704 return scheduler->is_enabled(t);
1708 * @brief Check if a Thread is currently enabled
1709 * @param tid The ID of the Thread to check
1710 * @return True if the Thread is currently enabled
1712 bool ModelExecution::is_enabled(thread_id_t tid) const
1714 return scheduler->is_enabled(tid);
1718 * @brief Select the next thread to execute based on the curren action
1720 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1721 * actions should be followed by the execution of their child thread. In either
1722 * case, the current action should determine the next thread schedule.
1724 * @param curr The current action
1725 * @return The next thread to run, if the current action will determine this
1726 * selection; otherwise NULL
1728 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1730 /* Do not split atomic RMW */
1731 if (curr->is_rmwr())
1732 return get_thread(curr);
1733 /* Follow CREATE with the created thread */
1734 /* which is not needed, because model.cc takes care of this */
1735 if (curr->get_type() == THREAD_CREATE)
1736 return curr->get_thread_operand();
1737 if (curr->get_type() == PTHREAD_CREATE) {
1738 return curr->get_thread_operand();
1744 * Takes the next step in the execution, if possible.
1745 * @param curr The current step to take
1746 * @return Returns the next Thread to run, if any; NULL if this execution
1749 Thread * ModelExecution::take_step(ModelAction *curr)
1751 Thread *curr_thrd = get_thread(curr);
1752 ASSERT(curr_thrd->get_state() == THREAD_READY);
1754 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1755 curr = check_current_action(curr);
1758 /* Process this action in ModelHistory for records */
1760 model->get_history()->process_action( curr, curr->get_tid() );
1762 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1763 scheduler->remove_thread(curr_thrd);
1765 return action_select_next_thread(curr);
1768 /** This method removes references to an Action before we delete it. */
1770 void ModelExecution::removeAction(ModelAction *act) {
1772 action_trace.removeAction(act);
1775 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1776 (*vec)[act->get_tid()].removeAction(act);
1778 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1779 sllnode<ModelAction *> * listref = act->getActionRef();
1780 if (listref != NULL) {
1781 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1782 list->erase(listref);
1784 } else if (act->is_wait()) {
1785 sllnode<ModelAction *> * listref = act->getActionRef();
1786 if (listref != NULL) {
1787 void *mutex_loc = (void *) act->get_value();
1788 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1790 } else if (act->is_free()) {
1791 sllnode<ModelAction *> * listref = act->getActionRef();
1792 if (listref != NULL) {
1793 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1794 (*vec)[act->get_tid()].erase(listref);
1797 //Clear it from last_sc_map
1798 if (obj_last_sc_map.get(act->get_location()) == act) {
1799 obj_last_sc_map.remove(act->get_location());
1802 //Remove from Cyclegraph
1803 mo_graph->freeAction(act);
1807 /** Computes clock vector that all running threads have already synchronized to. */
1809 ClockVector * ModelExecution::computeMinimalCV() {
1810 ClockVector *cvmin = NULL;
1811 //Thread 0 isn't a real thread, so skip it..
1812 for(unsigned int i = 1;i < thread_map.size();i++) {
1813 Thread * t = thread_map[i];
1814 if (t->get_state() == THREAD_COMPLETED)
1816 thread_id_t tid = int_to_id(i);
1817 ClockVector * cv = get_cv(tid);
1819 cvmin = new ClockVector(cv, NULL);
1821 cvmin->minmerge(cv);
1827 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1829 void ModelExecution::fixupLastAct(ModelAction *act) {
1830 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1831 newact->set_seq_number(get_next_seq_num());
1832 newact->create_cv(act);
1833 newact->set_last_fence_release(act->get_last_fence_release());
1834 add_action_to_lists(newact, false);
1837 /** Compute which actions to free. */
1839 void ModelExecution::collectActions() {
1840 if (priv->used_sequence_numbers < params->traceminsize)
1843 //Compute minimal clock vector for all live threads
1844 ClockVector *cvmin = computeMinimalCV();
1845 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1846 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1848 //Next walk action trace... When we hit an action, see if it is
1849 //invisible (e.g., earlier than the first before the minimum
1850 //clock for the thread... if so erase it and all previous
1851 //actions in cyclegraph
1852 sllnode<ModelAction*> * it;
1853 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1854 ModelAction *act = it->getVal();
1855 modelclock_t actseq = act->get_seq_number();
1857 //See if we are done
1858 if (actseq > maxtofree)
1861 thread_id_t act_tid = act->get_tid();
1862 modelclock_t tid_clock = cvmin->getClock(act_tid);
1864 //Free if it is invisible or we have set a flag to remove visible actions.
1865 if (actseq <= tid_clock || params->removevisible) {
1866 ModelAction * write;
1867 if (act->is_write()) {
1869 } else if (act->is_read()) {
1870 write = act->get_reads_from();
1874 //Mark everything earlier in MO graph to be freed
1875 CycleNode * cn = mo_graph->getNode_noCreate(write);
1877 queue->push_back(cn);
1878 while(!queue->empty()) {
1879 CycleNode * node = queue->back();
1881 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1882 CycleNode * prevnode = node->getInEdge(i);
1883 ModelAction * prevact = prevnode->getAction();
1884 if (prevact->get_type() != READY_FREE) {
1885 prevact->set_free();
1886 queue->push_back(prevnode);
1894 //We may need to remove read actions in the window we don't delete to preserve correctness.
1896 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1897 ModelAction *act = it2->getVal();
1898 //Do iteration early in case we delete the act
1900 bool islastact = false;
1901 ModelAction *lastact = get_last_action(act->get_tid());
1902 if (act == lastact) {
1903 Thread * th = get_thread(act);
1904 islastact = !th->is_complete();
1907 if (act->is_read()) {
1908 if (act->get_reads_from()->is_free()) {
1909 if (act->is_rmw()) {
1910 //Weaken a RMW from a freed store to a write
1911 act->set_type(ATOMIC_WRITE);
1923 //If we don't delete the action, we should remove references to release fences
1925 const ModelAction *rel_fence =act->get_last_fence_release();
1926 if (rel_fence != NULL) {
1927 modelclock_t relfenceseq = rel_fence->get_seq_number();
1928 thread_id_t relfence_tid = rel_fence->get_tid();
1929 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1930 //Remove references to irrelevant release fences
1931 if (relfenceseq <= tid_clock)
1932 act->set_last_fence_release(NULL);
1935 //Now we are in the window of old actions that we remove if possible
1936 for (;it != NULL;) {
1937 ModelAction *act = it->getVal();
1938 //Do iteration early since we may delete node...
1940 bool islastact = false;
1941 ModelAction *lastact = get_last_action(act->get_tid());
1942 if (act == lastact) {
1943 Thread * th = get_thread(act);
1944 islastact = !th->is_complete();
1947 if (act->is_read()) {
1948 if (act->get_reads_from()->is_free()) {
1949 if (act->is_rmw()) {
1950 act->set_type(ATOMIC_WRITE);
1960 } else if (act->is_free()) {
1967 } else if (act->is_write()) {
1968 //Do nothing with write that hasn't been marked to be freed
1969 } else if (islastact) {
1970 //Keep the last action for non-read/write actions
1971 } else if (act->is_fence()) {
1972 //Note that acquire fences can always be safely
1973 //removed, but could incur extra overheads in
1974 //traversals. Removing them before the cvmin seems
1975 //like a good compromise.
1977 //Release fences before the cvmin don't do anything
1978 //because everyone has already synchronized.
1980 //Sequentially fences before cvmin are redundant
1981 //because happens-before will enforce same
1984 modelclock_t actseq = act->get_seq_number();
1985 thread_id_t act_tid = act->get_tid();
1986 modelclock_t tid_clock = cvmin->getClock(act_tid);
1987 if (actseq <= tid_clock) {
1989 // Remove reference to act from thrd_last_fence_release
1990 int thread_id = id_to_int( act->get_tid() );
1991 if (thrd_last_fence_release[thread_id] == act) {
1992 thrd_last_fence_release[thread_id] = NULL;
1998 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1999 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
2000 //need to keep most recent unlock/wait for each lock
2001 if(act->is_unlock() || act->is_wait()) {
2002 ModelAction * lastlock = get_last_unlock(act);
2003 if (lastlock != act) {
2008 } else if (act->is_create()) {
2009 if (act->get_thread_operand()->is_complete()) {
2021 //If we don't delete the action, we should remove references to release fences
2022 const ModelAction *rel_fence =act->get_last_fence_release();
2023 if (rel_fence != NULL) {
2024 modelclock_t relfenceseq = rel_fence->get_seq_number();
2025 thread_id_t relfence_tid = rel_fence->get_tid();
2026 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2027 //Remove references to irrelevant release fences
2028 if (relfenceseq <= tid_clock)
2029 act->set_last_fence_release(NULL);
2037 Fuzzer * ModelExecution::getFuzzer() {