11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
49 /** @brief Constructor */
50 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
54 thread_map(2), /* We'll always need at least 2 threads */
59 condvar_waiters_map(),
66 thrd_last_fence_release(),
67 priv(new struct model_snapshot_members ()),
68 mo_graph(new CycleGraph()),
70 fuzzer(new NewFuzzer()),
76 /* Initialize a model-checker thread, for special ModelActions */
77 model_thread = new Thread(get_next_id());
78 add_thread(model_thread);
79 fuzzer->register_engine(m, this);
80 scheduler->register_engine(this);
82 pthread_key_create(&pthreadkey, tlsdestructor);
86 /** @brief Destructor */
87 ModelExecution::~ModelExecution()
89 for (unsigned int i = 0;i < get_num_threads();i++)
90 delete get_thread(int_to_id(i));
96 int ModelExecution::get_execution_number() const
98 return model->get_execution_number();
101 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
103 action_list_t *tmp = hash->get(ptr);
105 tmp = new action_list_t();
111 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
113 SnapVector<action_list_t> *tmp = hash->get(ptr);
115 tmp = new SnapVector<action_list_t>();
121 /** @return a thread ID for a new Thread */
122 thread_id_t ModelExecution::get_next_id()
124 return priv->next_thread_id++;
127 /** @return the number of user threads created during this execution */
128 unsigned int ModelExecution::get_num_threads() const
130 return priv->next_thread_id;
133 /** @return a sequence number for a new ModelAction */
134 modelclock_t ModelExecution::get_next_seq_num()
136 return ++priv->used_sequence_numbers;
139 /** @return a sequence number for a new ModelAction */
140 modelclock_t ModelExecution::get_curr_seq_num()
142 return priv->used_sequence_numbers;
145 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
146 void ModelExecution::restore_last_seq_num()
148 priv->used_sequence_numbers--;
152 * @brief Should the current action wake up a given thread?
154 * @param curr The current action
155 * @param thread The thread that we might wake up
156 * @return True, if we should wake up the sleeping thread; false otherwise
158 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
160 const ModelAction *asleep = thread->get_pending();
161 /* Don't allow partial RMW to wake anyone up */
164 /* Synchronizing actions may have been backtracked */
165 if (asleep->could_synchronize_with(curr))
167 /* All acquire/release fences and fence-acquire/store-release */
168 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
170 /* Fence-release + store can awake load-acquire on the same location */
171 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
172 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
173 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
176 /* The sleep is literally sleeping */
177 if (asleep->is_sleep()) {
178 if (fuzzer->shouldWake(asleep))
185 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
187 for (unsigned int i = 0;i < get_num_threads();i++) {
188 Thread *thr = get_thread(int_to_id(i));
189 if (scheduler->is_sleep_set(thr)) {
190 if (should_wake_up(curr, thr)) {
191 /* Remove this thread from sleep set */
192 scheduler->remove_sleep(thr);
193 if (thr->get_pending()->is_sleep())
194 thr->set_wakeup_state(true);
200 void ModelExecution::assert_bug(const char *msg)
202 priv->bugs.push_back(new bug_message(msg));
206 /** @return True, if any bugs have been reported for this execution */
207 bool ModelExecution::have_bug_reports() const
209 return priv->bugs.size() != 0;
212 SnapVector<bug_message *> * ModelExecution::get_bugs() const
218 * Check whether the current trace has triggered an assertion which should halt
221 * @return True, if the execution should be aborted; false otherwise
223 bool ModelExecution::has_asserted() const
225 return priv->asserted;
229 * Trigger a trace assertion which should cause this execution to be halted.
230 * This can be due to a detected bug or due to an infeasibility that should
233 void ModelExecution::set_assert()
235 priv->asserted = true;
239 * Check if we are in a deadlock. Should only be called at the end of an
240 * execution, although it should not give false positives in the middle of an
241 * execution (there should be some ENABLED thread).
243 * @return True if program is in a deadlock; false otherwise
245 bool ModelExecution::is_deadlocked() const
247 bool blocking_threads = false;
248 for (unsigned int i = 0;i < get_num_threads();i++) {
249 thread_id_t tid = int_to_id(i);
252 Thread *t = get_thread(tid);
253 if (!t->is_model_thread() && t->get_pending())
254 blocking_threads = true;
256 return blocking_threads;
260 * Check if this is a complete execution. That is, have all thread completed
261 * execution (rather than exiting because sleep sets have forced a redundant
264 * @return True if the execution is complete.
266 bool ModelExecution::is_complete_execution() const
268 for (unsigned int i = 0;i < get_num_threads();i++)
269 if (is_enabled(int_to_id(i)))
274 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
275 uint64_t value = *((const uint64_t *) location);
276 modelclock_t storeclock;
277 thread_id_t storethread;
278 getStoreThreadAndClock(location, &storethread, &storeclock);
279 setAtomicStoreFlag(location);
280 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
281 act->set_seq_number(storeclock);
282 add_normal_write_to_lists(act);
283 add_write_to_lists(act);
284 w_modification_order(act);
286 model->get_history()->process_action(act, act->get_tid());
292 * Processes a read model action.
293 * @param curr is the read model action to process.
294 * @param rf_set is the set of model actions we can possibly read from
295 * @return True if the read can be pruned from the thread map list.
297 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
299 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
300 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
301 if (hasnonatomicstore) {
302 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
303 rf_set->push_back(nonatomicstore);
306 // Remove writes that violate read modification order
309 while (i < rf_set->size()) {
310 ModelAction * rf = (*rf_set)[i];
311 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
312 (*rf_set)[i] = rf_set->back();
319 int index = fuzzer->selectWrite(curr, rf_set);
321 ModelAction *rf = (*rf_set)[index];
324 bool canprune = false;
325 if (r_modification_order(curr, rf, priorset, &canprune)) {
326 for(unsigned int i=0;i<priorset->size();i++) {
327 mo_graph->addEdge((*priorset)[i], rf);
330 get_thread(curr)->set_return_value(curr->get_return_value());
332 return canprune && (curr->get_type() == ATOMIC_READ);
335 (*rf_set)[index] = rf_set->back();
341 * Processes a lock, trylock, or unlock model action. @param curr is
342 * the read model action to process.
344 * The try lock operation checks whether the lock is taken. If not,
345 * it falls to the normal lock operation case. If so, it returns
348 * The lock operation has already been checked that it is enabled, so
349 * it just grabs the lock and synchronizes with the previous unlock.
351 * The unlock operation has to re-enable all of the threads that are
352 * waiting on the lock.
354 * @return True if synchronization was updated; false otherwise
356 bool ModelExecution::process_mutex(ModelAction *curr)
358 cdsc::mutex *mutex = curr->get_mutex();
359 struct cdsc::mutex_state *state = NULL;
362 state = mutex->get_state();
364 switch (curr->get_type()) {
365 case ATOMIC_TRYLOCK: {
366 bool success = !state->locked;
367 curr->set_try_lock(success);
369 get_thread(curr)->set_return_value(0);
372 get_thread(curr)->set_return_value(1);
374 //otherwise fall into the lock case
376 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
377 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
378 // assert_bug("Lock access before initialization");
380 // TODO: lock count for recursive mutexes
381 state->locked = get_thread(curr);
382 ModelAction *unlock = get_last_unlock(curr);
383 //synchronize with the previous unlock statement
384 if (unlock != NULL) {
385 synchronize(unlock, curr);
391 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
392 if (fuzzer->shouldWait(curr)) {
393 /* wake up the other threads */
394 for (unsigned int i = 0;i < get_num_threads();i++) {
395 Thread *t = get_thread(int_to_id(i));
396 Thread *curr_thrd = get_thread(curr);
397 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
401 /* unlock the lock - after checking who was waiting on it */
402 state->locked = NULL;
404 /* remove old wait action and disable this thread */
405 action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
406 for (sllnode<ModelAction *> * it = waiters->begin(); it != NULL; it = it->getNext()) {
407 ModelAction * wait = it->getVal();
408 if (wait->get_tid() == curr->get_tid()) {
414 waiters->push_back(curr);
415 scheduler->sleep(get_thread(curr));
420 case ATOMIC_TIMEDWAIT:
421 case ATOMIC_UNLOCK: {
422 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
423 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
424 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
425 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
426 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
427 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
428 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
429 //MUST EVENMTUALLY RELEASE...
431 // TODO: lock count for recursive mutexes
432 /* wake up the other threads */
433 for (unsigned int i = 0;i < get_num_threads();i++) {
434 Thread *t = get_thread(int_to_id(i));
435 Thread *curr_thrd = get_thread(curr);
436 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
440 /* unlock the lock - after checking who was waiting on it */
441 state->locked = NULL;
444 case ATOMIC_NOTIFY_ALL: {
445 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
446 //activate all the waiting threads
447 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
448 scheduler->wake(get_thread(rit->getVal()));
453 case ATOMIC_NOTIFY_ONE: {
454 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
455 if (waiters->size() != 0) {
456 Thread * thread = fuzzer->selectNotify(waiters);
457 scheduler->wake(thread);
469 * Process a write ModelAction
470 * @param curr The ModelAction to process
471 * @return True if the mo_graph was updated or promises were resolved
473 void ModelExecution::process_write(ModelAction *curr)
475 w_modification_order(curr);
476 get_thread(curr)->set_return_value(VALUE_NONE);
480 * Process a fence ModelAction
481 * @param curr The ModelAction to process
482 * @return True if synchronization was updated
484 bool ModelExecution::process_fence(ModelAction *curr)
487 * fence-relaxed: no-op
488 * fence-release: only log the occurence (not in this function), for
489 * use in later synchronization
490 * fence-acquire (this function): search for hypothetical release
492 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
494 bool updated = false;
495 if (curr->is_acquire()) {
496 action_list_t *list = &action_trace;
497 sllnode<ModelAction *> * rit;
498 /* Find X : is_read(X) && X --sb-> curr */
499 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
500 ModelAction *act = rit->getVal();
503 if (act->get_tid() != curr->get_tid())
505 /* Stop at the beginning of the thread */
506 if (act->is_thread_start())
508 /* Stop once we reach a prior fence-acquire */
509 if (act->is_fence() && act->is_acquire())
513 /* read-acquire will find its own release sequences */
514 if (act->is_acquire())
517 /* Establish hypothetical release sequences */
518 ClockVector *cv = get_hb_from_write(act->get_reads_from());
519 if (cv != NULL && curr->get_cv()->merge(cv))
527 * @brief Process the current action for thread-related activity
529 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
530 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
531 * synchronization, etc. This function is a no-op for non-THREAD actions
532 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
534 * @param curr The current action
535 * @return True if synchronization was updated or a thread completed
537 void ModelExecution::process_thread_action(ModelAction *curr)
539 switch (curr->get_type()) {
540 case THREAD_CREATE: {
541 thrd_t *thrd = (thrd_t *)curr->get_location();
542 struct thread_params *params = (struct thread_params *)curr->get_value();
543 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
544 curr->set_thread_operand(th);
546 th->set_creation(curr);
549 case PTHREAD_CREATE: {
550 (*(uint32_t *)curr->get_location()) = pthread_counter++;
552 struct pthread_params *params = (struct pthread_params *)curr->get_value();
553 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
554 curr->set_thread_operand(th);
556 th->set_creation(curr);
558 if ( pthread_map.size() < pthread_counter )
559 pthread_map.resize( pthread_counter );
560 pthread_map[ pthread_counter-1 ] = th;
565 Thread *blocking = curr->get_thread_operand();
566 ModelAction *act = get_last_action(blocking->get_id());
567 synchronize(act, curr);
571 Thread *blocking = curr->get_thread_operand();
572 ModelAction *act = get_last_action(blocking->get_id());
573 synchronize(act, curr);
574 break; // WL: to be add (modified)
577 case THREADONLY_FINISH:
578 case THREAD_FINISH: {
579 Thread *th = get_thread(curr);
580 if (curr->get_type() == THREAD_FINISH &&
581 th == model->getInitThread()) {
587 /* Wake up any joining threads */
588 for (unsigned int i = 0;i < get_num_threads();i++) {
589 Thread *waiting = get_thread(int_to_id(i));
590 if (waiting->waiting_on() == th &&
591 waiting->get_pending()->is_thread_join())
592 scheduler->wake(waiting);
601 Thread *th = get_thread(curr);
602 th->set_pending(curr);
603 scheduler->add_sleep(th);
612 * Initialize the current action by performing one or more of the following
613 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
614 * manipulating backtracking sets, allocating and
615 * initializing clock vectors, and computing the promises to fulfill.
617 * @param curr The current action, as passed from the user context; may be
618 * freed/invalidated after the execution of this function, with a different
619 * action "returned" its place (pass-by-reference)
620 * @return True if curr is a newly-explored action; false otherwise
622 bool ModelExecution::initialize_curr_action(ModelAction **curr)
624 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
625 ModelAction *newcurr = process_rmw(*curr);
631 ModelAction *newcurr = *curr;
633 newcurr->set_seq_number(get_next_seq_num());
634 /* Always compute new clock vector */
635 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
637 /* Assign most recent release fence */
638 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
640 return true; /* This was a new ModelAction */
645 * @brief Establish reads-from relation between two actions
647 * Perform basic operations involved with establishing a concrete rf relation,
648 * including setting the ModelAction data and checking for release sequences.
650 * @param act The action that is reading (must be a read)
651 * @param rf The action from which we are reading (must be a write)
653 * @return True if this read established synchronization
656 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
659 ASSERT(rf->is_write());
661 act->set_read_from(rf);
662 if (act->is_acquire()) {
663 ClockVector *cv = get_hb_from_write(rf);
666 act->get_cv()->merge(cv);
671 * @brief Synchronizes two actions
673 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
674 * This function performs the synchronization as well as providing other hooks
675 * for other checks along with synchronization.
677 * @param first The left-hand side of the synchronizes-with relation
678 * @param second The right-hand side of the synchronizes-with relation
679 * @return True if the synchronization was successful (i.e., was consistent
680 * with the execution order); false otherwise
682 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
684 if (*second < *first) {
685 ASSERT(0); //This should not happend
688 return second->synchronize_with(first);
692 * @brief Check whether a model action is enabled.
694 * Checks whether an operation would be successful (i.e., is a lock already
695 * locked, or is the joined thread already complete).
697 * For yield-blocking, yields are never enabled.
699 * @param curr is the ModelAction to check whether it is enabled.
700 * @return a bool that indicates whether the action is enabled.
702 bool ModelExecution::check_action_enabled(ModelAction *curr) {
703 if (curr->is_lock()) {
704 cdsc::mutex *lock = curr->get_mutex();
705 struct cdsc::mutex_state *state = lock->get_state();
707 Thread *lock_owner = (Thread *)state->locked;
708 Thread *curr_thread = get_thread(curr);
709 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
715 } else if (curr->is_thread_join()) {
716 Thread *blocking = curr->get_thread_operand();
717 if (!blocking->is_complete()) {
720 } else if (curr->is_sleep()) {
721 if (!fuzzer->shouldSleep(curr))
729 * This is the heart of the model checker routine. It performs model-checking
730 * actions corresponding to a given "current action." Among other processes, it
731 * calculates reads-from relationships, updates synchronization clock vectors,
732 * forms a memory_order constraints graph, and handles replay/backtrack
733 * execution when running permutations of previously-observed executions.
735 * @param curr The current action to process
736 * @return The ModelAction that is actually executed; may be different than
739 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
742 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
743 bool newly_explored = initialize_curr_action(&curr);
747 wake_up_sleeping_actions(curr);
749 SnapVector<ModelAction *> * rf_set = NULL;
750 /* Build may_read_from set for newly-created actions */
751 if (newly_explored && curr->is_read())
752 rf_set = build_may_read_from(curr);
754 bool canprune = false;
756 if (curr->is_read() && !second_part_of_rmw) {
757 canprune = process_read(curr, rf_set);
760 ASSERT(rf_set == NULL);
762 /* Add the action to lists */
763 if (!second_part_of_rmw)
764 add_action_to_lists(curr, canprune);
766 if (curr->is_write())
767 add_write_to_lists(curr);
769 process_thread_action(curr);
771 if (curr->is_write())
774 if (curr->is_fence())
777 if (curr->is_mutex_op())
783 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
784 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
785 ModelAction *lastread = get_last_action(act->get_tid());
786 lastread->process_rmw(act);
788 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
794 * @brief Updates the mo_graph with the constraints imposed from the current
797 * Basic idea is the following: Go through each other thread and find
798 * the last action that happened before our read. Two cases:
800 * -# The action is a write: that write must either occur before
801 * the write we read from or be the write we read from.
802 * -# The action is a read: the write that that action read from
803 * must occur before the write we read from or be the same write.
805 * @param curr The current action. Must be a read.
806 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
807 * @param check_only If true, then only check whether the current action satisfies
808 * read modification order or not, without modifiying priorset and canprune.
810 * @return True if modification order edges were added; false otherwise
813 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
814 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
816 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
817 ASSERT(curr->is_read());
819 /* Last SC fence in the current thread */
820 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
822 int tid = curr->get_tid();
824 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
825 if ((int)thrd_lists->size() <= tid) {
826 uint oldsize = thrd_lists->size();
827 thrd_lists->resize(priv->next_thread_id);
828 for(uint i = oldsize;i < priv->next_thread_id;i++)
829 new (&(*thrd_lists)[i]) action_list_t();
832 ModelAction *prev_same_thread = NULL;
833 /* Iterate over all threads */
834 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
835 /* Last SC fence in thread tid */
836 ModelAction *last_sc_fence_thread_local = NULL;
838 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
840 /* Last SC fence in thread tid, before last SC fence in current thread */
841 ModelAction *last_sc_fence_thread_before = NULL;
842 if (last_sc_fence_local)
843 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
845 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
846 if (prev_same_thread != NULL &&
847 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
848 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
852 /* Iterate over actions in thread, starting from most recent */
853 action_list_t *list = &(*thrd_lists)[tid];
854 sllnode<ModelAction *> * rit;
855 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
856 ModelAction *act = rit->getVal();
861 /* Don't want to add reflexive edges on 'rf' */
862 if (act->equals(rf)) {
863 if (act->happens_before(curr))
869 if (act->is_write()) {
870 /* C++, Section 29.3 statement 5 */
871 if (curr->is_seqcst() && last_sc_fence_thread_local &&
872 *act < *last_sc_fence_thread_local) {
873 if (mo_graph->checkReachable(rf, act))
876 priorset->push_back(act);
879 /* C++, Section 29.3 statement 4 */
880 else if (act->is_seqcst() && last_sc_fence_local &&
881 *act < *last_sc_fence_local) {
882 if (mo_graph->checkReachable(rf, act))
885 priorset->push_back(act);
888 /* C++, Section 29.3 statement 6 */
889 else if (last_sc_fence_thread_before &&
890 *act < *last_sc_fence_thread_before) {
891 if (mo_graph->checkReachable(rf, act))
894 priorset->push_back(act);
900 * Include at most one act per-thread that "happens
903 if (act->happens_before(curr)) {
905 if (last_sc_fence_local == NULL ||
906 (*last_sc_fence_local < *act)) {
907 prev_same_thread = act;
910 if (act->is_write()) {
911 if (mo_graph->checkReachable(rf, act))
914 priorset->push_back(act);
916 ModelAction *prevrf = act->get_reads_from();
917 if (!prevrf->equals(rf)) {
918 if (mo_graph->checkReachable(rf, prevrf))
921 priorset->push_back(prevrf);
923 if (act->get_tid() == curr->get_tid()) {
924 //Can prune curr from obj list
938 * Updates the mo_graph with the constraints imposed from the current write.
940 * Basic idea is the following: Go through each other thread and find
941 * the lastest action that happened before our write. Two cases:
943 * (1) The action is a write => that write must occur before
946 * (2) The action is a read => the write that that action read from
947 * must occur before the current write.
949 * This method also handles two other issues:
951 * (I) Sequential Consistency: Making sure that if the current write is
952 * seq_cst, that it occurs after the previous seq_cst write.
954 * (II) Sending the write back to non-synchronizing reads.
956 * @param curr The current action. Must be a write.
957 * @param send_fv A vector for stashing reads to which we may pass our future
958 * value. If NULL, then don't record any future values.
959 * @return True if modification order edges were added; false otherwise
961 void ModelExecution::w_modification_order(ModelAction *curr)
963 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
965 ASSERT(curr->is_write());
967 SnapList<ModelAction *> edgeset;
969 if (curr->is_seqcst()) {
970 /* We have to at least see the last sequentially consistent write,
971 so we are initialized. */
972 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
973 if (last_seq_cst != NULL) {
974 edgeset.push_back(last_seq_cst);
976 //update map for next query
977 obj_last_sc_map.put(curr->get_location(), curr);
980 /* Last SC fence in the current thread */
981 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
983 /* Iterate over all threads */
984 for (i = 0;i < thrd_lists->size();i++) {
985 /* Last SC fence in thread i, before last SC fence in current thread */
986 ModelAction *last_sc_fence_thread_before = NULL;
987 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
988 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
990 /* Iterate over actions in thread, starting from most recent */
991 action_list_t *list = &(*thrd_lists)[i];
992 sllnode<ModelAction*>* rit;
993 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
994 ModelAction *act = rit->getVal();
997 * 1) If RMW and it actually read from something, then we
998 * already have all relevant edges, so just skip to next
1001 * 2) If RMW and it didn't read from anything, we should
1002 * whatever edge we can get to speed up convergence.
1004 * 3) If normal write, we need to look at earlier actions, so
1005 * continue processing list.
1007 if (curr->is_rmw()) {
1008 if (curr->get_reads_from() != NULL)
1016 /* C++, Section 29.3 statement 7 */
1017 if (last_sc_fence_thread_before && act->is_write() &&
1018 *act < *last_sc_fence_thread_before) {
1019 edgeset.push_back(act);
1024 * Include at most one act per-thread that "happens
1027 if (act->happens_before(curr)) {
1029 * Note: if act is RMW, just add edge:
1031 * The following edge should be handled elsewhere:
1032 * readfrom(act) --mo--> act
1034 if (act->is_write())
1035 edgeset.push_back(act);
1036 else if (act->is_read()) {
1037 //if previous read accessed a null, just keep going
1038 edgeset.push_back(act->get_reads_from());
1044 mo_graph->addEdges(&edgeset, curr);
1049 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1050 * some constraints. This method checks one the following constraint (others
1051 * require compiler support):
1053 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1054 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1056 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1058 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1060 /* Iterate over all threads */
1061 for (i = 0;i < thrd_lists->size();i++) {
1062 const ModelAction *write_after_read = NULL;
1064 /* Iterate over actions in thread, starting from most recent */
1065 action_list_t *list = &(*thrd_lists)[i];
1066 sllnode<ModelAction *>* rit;
1067 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1068 ModelAction *act = rit->getVal();
1070 /* Don't disallow due to act == reader */
1071 if (!reader->happens_before(act) || reader == act)
1073 else if (act->is_write())
1074 write_after_read = act;
1075 else if (act->is_read() && act->get_reads_from() != NULL)
1076 write_after_read = act->get_reads_from();
1079 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1086 * Computes the clock vector that happens before propagates from this write.
1088 * @param rf The action that might be part of a release sequence. Must be a
1090 * @return ClockVector of happens before relation.
1093 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1094 SnapVector<ModelAction *> * processset = NULL;
1095 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1096 ASSERT(rf->is_write());
1097 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1099 if (processset == NULL)
1100 processset = new SnapVector<ModelAction *>();
1101 processset->push_back(rf);
1104 int i = (processset == NULL) ? 0 : processset->size();
1106 ClockVector * vec = NULL;
1108 if (rf->get_rfcv() != NULL) {
1109 vec = rf->get_rfcv();
1110 } else if (rf->is_acquire() && rf->is_release()) {
1112 } else if (rf->is_release() && !rf->is_rmw()) {
1114 } else if (rf->is_release()) {
1115 //have rmw that is release and doesn't have a rfcv
1116 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1119 //operation that isn't release
1120 if (rf->get_last_fence_release()) {
1122 vec = rf->get_last_fence_release()->get_cv();
1124 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1130 rf = (*processset)[i];
1134 if (processset != NULL)
1140 * Performs various bookkeeping operations for the current ModelAction. For
1141 * instance, adds action to the per-object, per-thread action vector and to the
1142 * action trace list of all thread actions.
1144 * @param act is the ModelAction to add.
1146 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1148 int tid = id_to_int(act->get_tid());
1149 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1150 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1151 act->setActionRef(list->add_back(act));
1154 // Update action trace, a total order of all actions
1155 act->setTraceRef(action_trace.add_back(act));
1158 // Update obj_thrd_map, a per location, per thread, order of actions
1159 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1160 if ((int)vec->size() <= tid) {
1161 uint oldsize = vec->size();
1162 vec->resize(priv->next_thread_id);
1163 for(uint i = oldsize;i < priv->next_thread_id;i++)
1164 new (&(*vec)[i]) action_list_t();
1166 if (!canprune && (act->is_read() || act->is_write()))
1167 act->setThrdMapRef((*vec)[tid].add_back(act));
1169 // Update thrd_last_action, the last action taken by each thread
1170 if ((int)thrd_last_action.size() <= tid)
1171 thrd_last_action.resize(get_num_threads());
1172 thrd_last_action[tid] = act;
1174 // Update thrd_last_fence_release, the last release fence taken by each thread
1175 if (act->is_fence() && act->is_release()) {
1176 if ((int)thrd_last_fence_release.size() <= tid)
1177 thrd_last_fence_release.resize(get_num_threads());
1178 thrd_last_fence_release[tid] = act;
1181 if (act->is_wait()) {
1182 void *mutex_loc = (void *) act->get_value();
1183 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1187 sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
1188 sllnode<ModelAction*> * rit = list->end();
1189 modelclock_t next_seq = act->get_seq_number();
1190 if (rit == NULL || (rit->getVal()->get_seq_number() <= next_seq))
1191 return list->add_back(act);
1193 for(;rit != NULL;rit=rit->getPrev()) {
1194 if (rit->getVal()->get_seq_number() <= next_seq) {
1195 return list->insertAfter(rit, act);
1198 return list->add_front(act);
1202 sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1203 sllnode<ModelAction*> * rit = list->end();
1204 modelclock_t next_seq = act->get_seq_number();
1206 act->create_cv(NULL);
1207 return list->add_back(act);
1208 } else if (rit->getVal()->get_seq_number() <= next_seq) {
1209 act->create_cv(rit->getVal());
1210 return list->add_back(act);
1212 for(;rit != NULL;rit=rit->getPrev()) {
1213 if (rit->getVal()->get_seq_number() <= next_seq) {
1214 act->create_cv(rit->getVal());
1215 return list->insertAfter(rit, act);
1218 return list->add_front(act);
1223 * Performs various bookkeeping operations for a normal write. The
1224 * complication is that we are typically inserting a normal write
1225 * lazily, so we need to insert it into the middle of lists.
1227 * @param act is the ModelAction to add.
1230 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1232 int tid = id_to_int(act->get_tid());
1233 act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
1235 // Update obj_thrd_map, a per location, per thread, order of actions
1236 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1237 if (tid >= (int)vec->size()) {
1238 uint oldsize =vec->size();
1239 vec->resize(priv->next_thread_id);
1240 for(uint i=oldsize;i<priv->next_thread_id;i++)
1241 new (&(*vec)[i]) action_list_t();
1243 act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
1245 ModelAction * lastact = thrd_last_action[tid];
1246 // Update thrd_last_action, the last action taken by each thrad
1247 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1248 thrd_last_action[tid] = act;
1252 void ModelExecution::add_write_to_lists(ModelAction *write) {
1253 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1254 int tid = id_to_int(write->get_tid());
1255 if (tid >= (int)vec->size()) {
1256 uint oldsize =vec->size();
1257 vec->resize(priv->next_thread_id);
1258 for(uint i=oldsize;i<priv->next_thread_id;i++)
1259 new (&(*vec)[i]) action_list_t();
1261 write->setActionRef((*vec)[tid].add_back(write));
1265 * @brief Get the last action performed by a particular Thread
1266 * @param tid The thread ID of the Thread in question
1267 * @return The last action in the thread
1269 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1271 int threadid = id_to_int(tid);
1272 if (threadid < (int)thrd_last_action.size())
1273 return thrd_last_action[id_to_int(tid)];
1279 * @brief Get the last fence release performed by a particular Thread
1280 * @param tid The thread ID of the Thread in question
1281 * @return The last fence release in the thread, if one exists; NULL otherwise
1283 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1285 int threadid = id_to_int(tid);
1286 if (threadid < (int)thrd_last_fence_release.size())
1287 return thrd_last_fence_release[id_to_int(tid)];
1293 * Gets the last memory_order_seq_cst write (in the total global sequence)
1294 * performed on a particular object (i.e., memory location), not including the
1296 * @param curr The current ModelAction; also denotes the object location to
1298 * @return The last seq_cst write
1300 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1302 void *location = curr->get_location();
1303 return obj_last_sc_map.get(location);
1307 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1308 * performed in a particular thread, prior to a particular fence.
1309 * @param tid The ID of the thread to check
1310 * @param before_fence The fence from which to begin the search; if NULL, then
1311 * search for the most recent fence in the thread.
1312 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1314 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1316 /* All fences should have location FENCE_LOCATION */
1317 action_list_t *list = obj_map.get(FENCE_LOCATION);
1322 sllnode<ModelAction*>* rit = list->end();
1325 for (;rit != NULL;rit=rit->getPrev())
1326 if (rit->getVal() == before_fence)
1329 ASSERT(rit->getVal() == before_fence);
1333 for (;rit != NULL;rit=rit->getPrev()) {
1334 ModelAction *act = rit->getVal();
1335 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1342 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1343 * location). This function identifies the mutex according to the current
1344 * action, which is presumed to perform on the same mutex.
1345 * @param curr The current ModelAction; also denotes the object location to
1347 * @return The last unlock operation
1349 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1351 void *location = curr->get_location();
1353 action_list_t *list = obj_map.get(location);
1357 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1358 sllnode<ModelAction*>* rit;
1359 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1360 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1361 return rit->getVal();
1365 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1367 ModelAction *parent = get_last_action(tid);
1369 parent = get_thread(tid)->get_creation();
1374 * Returns the clock vector for a given thread.
1375 * @param tid The thread whose clock vector we want
1376 * @return Desired clock vector
1378 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1380 ModelAction *firstaction=get_parent_action(tid);
1381 return firstaction != NULL ? firstaction->get_cv() : NULL;
1384 bool valequals(uint64_t val1, uint64_t val2, int size) {
1387 return ((uint8_t)val1) == ((uint8_t)val2);
1389 return ((uint16_t)val1) == ((uint16_t)val2);
1391 return ((uint32_t)val1) == ((uint32_t)val2);
1401 * Build up an initial set of all past writes that this 'read' action may read
1402 * from, as well as any previously-observed future values that must still be valid.
1404 * @param curr is the current ModelAction that we are exploring; it must be a
1407 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1409 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1411 ASSERT(curr->is_read());
1413 ModelAction *last_sc_write = NULL;
1415 if (curr->is_seqcst())
1416 last_sc_write = get_last_seq_cst_write(curr);
1418 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1420 /* Iterate over all threads */
1421 if (thrd_lists != NULL)
1422 for (i = 0;i < thrd_lists->size();i++) {
1423 /* Iterate over actions in thread, starting from most recent */
1424 action_list_t *list = &(*thrd_lists)[i];
1425 sllnode<ModelAction *> * rit;
1426 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1427 ModelAction *act = rit->getVal();
1432 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1433 bool allow_read = true;
1435 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1438 /* Need to check whether we will have two RMW reading from the same value */
1439 if (curr->is_rmwr()) {
1440 /* It is okay if we have a failing CAS */
1441 if (!curr->is_rmwrcas() ||
1442 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1443 //Need to make sure we aren't the second RMW
1444 CycleNode * node = mo_graph->getNode_noCreate(act);
1445 if (node != NULL && node->getRMW() != NULL) {
1446 //we are the second RMW
1453 /* Only add feasible reads */
1454 rf_set->push_back(act);
1457 /* Include at most one act per-thread that "happens before" curr */
1458 if (act->happens_before(curr))
1463 if (DBG_ENABLED()) {
1464 model_print("Reached read action:\n");
1466 model_print("End printing read_from_past\n");
1471 static void print_list(action_list_t *list)
1473 sllnode<ModelAction*> *it;
1475 model_print("------------------------------------------------------------------------------------\n");
1476 model_print("# t Action type MO Location Value Rf CV\n");
1477 model_print("------------------------------------------------------------------------------------\n");
1479 unsigned int hash = 0;
1481 for (it = list->begin();it != NULL;it=it->getNext()) {
1482 const ModelAction *act = it->getVal();
1483 if (act->get_seq_number() > 0)
1485 hash = hash^(hash<<3)^(it->getVal()->hash());
1487 model_print("HASH %u\n", hash);
1488 model_print("------------------------------------------------------------------------------------\n");
1491 #if SUPPORT_MOD_ORDER_DUMP
1492 void ModelExecution::dumpGraph(char *filename)
1495 sprintf(buffer, "%s.dot", filename);
1496 FILE *file = fopen(buffer, "w");
1497 fprintf(file, "digraph %s {\n", filename);
1498 mo_graph->dumpNodes(file);
1499 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1501 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1502 ModelAction *act = it->getVal();
1503 if (act->is_read()) {
1504 mo_graph->dot_print_node(file, act);
1505 mo_graph->dot_print_edge(file,
1506 act->get_reads_from(),
1508 "label=\"rf\", color=red, weight=2");
1510 if (thread_array[act->get_tid()]) {
1511 mo_graph->dot_print_edge(file,
1512 thread_array[id_to_int(act->get_tid())],
1514 "label=\"sb\", color=blue, weight=400");
1517 thread_array[act->get_tid()] = act;
1519 fprintf(file, "}\n");
1520 model_free(thread_array);
1525 /** @brief Prints an execution trace summary. */
1526 void ModelExecution::print_summary()
1528 #if SUPPORT_MOD_ORDER_DUMP
1529 char buffername[100];
1530 sprintf(buffername, "exec%04u", get_execution_number());
1531 mo_graph->dumpGraphToFile(buffername);
1532 sprintf(buffername, "graph%04u", get_execution_number());
1533 dumpGraph(buffername);
1536 model_print("Execution trace %d:", get_execution_number());
1537 if (scheduler->all_threads_sleeping())
1538 model_print(" SLEEP-SET REDUNDANT");
1539 if (have_bug_reports())
1540 model_print(" DETECTED BUG(S)");
1544 print_list(&action_trace);
1549 void ModelExecution::print_tail()
1551 model_print("Execution trace %d:\n", get_execution_number());
1553 sllnode<ModelAction*> *it;
1555 model_print("------------------------------------------------------------------------------------\n");
1556 model_print("# t Action type MO Location Value Rf CV\n");
1557 model_print("------------------------------------------------------------------------------------\n");
1559 unsigned int hash = 0;
1563 SnapList<ModelAction *> list;
1564 for (it = action_trace.end(); it != NULL; it = it->getPrev()) {
1565 if (counter > length)
1568 ModelAction * act = it->getVal();
1569 list.push_front(act);
1573 for (it = list.begin();it != NULL;it=it->getNext()) {
1574 const ModelAction *act = it->getVal();
1575 if (act->get_seq_number() > 0)
1577 hash = hash^(hash<<3)^(it->getVal()->hash());
1579 model_print("HASH %u\n", hash);
1580 model_print("------------------------------------------------------------------------------------\n");
1584 * Add a Thread to the system for the first time. Should only be called once
1586 * @param t The Thread to add
1588 void ModelExecution::add_thread(Thread *t)
1590 unsigned int i = id_to_int(t->get_id());
1591 if (i >= thread_map.size())
1592 thread_map.resize(i + 1);
1594 if (!t->is_model_thread())
1595 scheduler->add_thread(t);
1599 * @brief Get a Thread reference by its ID
1600 * @param tid The Thread's ID
1601 * @return A Thread reference
1603 Thread * ModelExecution::get_thread(thread_id_t tid) const
1605 unsigned int i = id_to_int(tid);
1606 if (i < thread_map.size())
1607 return thread_map[i];
1612 * @brief Get a reference to the Thread in which a ModelAction was executed
1613 * @param act The ModelAction
1614 * @return A Thread reference
1616 Thread * ModelExecution::get_thread(const ModelAction *act) const
1618 return get_thread(act->get_tid());
1622 * @brief Get a Thread reference by its pthread ID
1623 * @param index The pthread's ID
1624 * @return A Thread reference
1626 Thread * ModelExecution::get_pthread(pthread_t pid) {
1627 // pid 1 is reserved for the main thread, pthread ids should start from 2
1629 return get_thread(pid);
1636 uint32_t thread_id = x.v;
1638 if (thread_id < pthread_counter + 1)
1639 return pthread_map[thread_id];
1645 * @brief Check if a Thread is currently enabled
1646 * @param t The Thread to check
1647 * @return True if the Thread is currently enabled
1649 bool ModelExecution::is_enabled(Thread *t) const
1651 return scheduler->is_enabled(t);
1655 * @brief Check if a Thread is currently enabled
1656 * @param tid The ID of the Thread to check
1657 * @return True if the Thread is currently enabled
1659 bool ModelExecution::is_enabled(thread_id_t tid) const
1661 return scheduler->is_enabled(tid);
1665 * @brief Select the next thread to execute based on the curren action
1667 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1668 * actions should be followed by the execution of their child thread. In either
1669 * case, the current action should determine the next thread schedule.
1671 * @param curr The current action
1672 * @return The next thread to run, if the current action will determine this
1673 * selection; otherwise NULL
1675 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1677 /* Do not split atomic RMW */
1678 if (curr->is_rmwr())
1679 return get_thread(curr);
1680 /* Follow CREATE with the created thread */
1681 /* which is not needed, because model.cc takes care of this */
1682 if (curr->get_type() == THREAD_CREATE)
1683 return curr->get_thread_operand();
1684 if (curr->get_type() == PTHREAD_CREATE) {
1685 return curr->get_thread_operand();
1691 * Takes the next step in the execution, if possible.
1692 * @param curr The current step to take
1693 * @return Returns the next Thread to run, if any; NULL if this execution
1696 Thread * ModelExecution::take_step(ModelAction *curr)
1698 Thread *curr_thrd = get_thread(curr);
1699 ASSERT(curr_thrd->get_state() == THREAD_READY);
1701 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1702 curr = check_current_action(curr);
1705 /* Process this action in ModelHistory for records */
1707 model->get_history()->process_action( curr, curr->get_tid() );
1709 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1710 scheduler->remove_thread(curr_thrd);
1712 return action_select_next_thread(curr);
1715 /** This method removes references to an Action before we delete it. */
1717 void ModelExecution::removeAction(ModelAction *act) {
1719 sllnode<ModelAction *> * listref = act->getTraceRef();
1720 if (listref != NULL) {
1721 action_trace.erase(listref);
1725 sllnode<ModelAction *> * listref = act->getThrdMapRef();
1726 if (listref != NULL) {
1727 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1728 (*vec)[act->get_tid()].erase(listref);
1731 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1732 sllnode<ModelAction *> * listref = act->getActionRef();
1733 if (listref != NULL) {
1734 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1735 list->erase(listref);
1737 } else if (act->is_wait()) {
1738 sllnode<ModelAction *> * listref = act->getActionRef();
1739 if (listref != NULL) {
1740 void *mutex_loc = (void *) act->get_value();
1741 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1743 } else if (act->is_free()) {
1744 sllnode<ModelAction *> * listref = act->getActionRef();
1745 if (listref != NULL) {
1746 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1747 (*vec)[act->get_tid()].erase(listref);
1749 //Clear it from last_sc_map
1750 if (obj_last_sc_map.get(act->get_location()) == act) {
1751 obj_last_sc_map.remove(act->get_location());
1754 //Remove from Cyclegraph
1755 mo_graph->freeAction(act);
1759 /** Computes clock vector that all running threads have already synchronized to. */
1761 ClockVector * ModelExecution::computeMinimalCV() {
1762 ClockVector *cvmin = NULL;
1763 //Thread 0 isn't a real thread, so skip it..
1764 for(unsigned int i = 1;i < thread_map.size();i++) {
1765 Thread * t = thread_map[i];
1766 if (t->get_state() == THREAD_COMPLETED)
1768 thread_id_t tid = int_to_id(i);
1769 ClockVector * cv = get_cv(tid);
1771 cvmin = new ClockVector(cv, NULL);
1773 cvmin->minmerge(cv);
1779 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1781 void ModelExecution::fixupLastAct(ModelAction *act) {
1782 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1783 newact->set_seq_number(get_next_seq_num());
1784 newact->create_cv(act);
1785 newact->set_last_fence_release(act->get_last_fence_release());
1786 add_action_to_lists(newact, false);
1789 /** Compute which actions to free. */
1791 void ModelExecution::collectActions() {
1792 if (priv->used_sequence_numbers < params->traceminsize)
1795 //Compute minimal clock vector for all live threads
1796 ClockVector *cvmin = computeMinimalCV();
1797 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1798 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1800 //Next walk action trace... When we hit an action, see if it is
1801 //invisible (e.g., earlier than the first before the minimum
1802 //clock for the thread... if so erase it and all previous
1803 //actions in cyclegraph
1804 sllnode<ModelAction*> * it;
1805 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1806 ModelAction *act = it->getVal();
1807 modelclock_t actseq = act->get_seq_number();
1809 //See if we are done
1810 if (actseq > maxtofree)
1813 thread_id_t act_tid = act->get_tid();
1814 modelclock_t tid_clock = cvmin->getClock(act_tid);
1816 //Free if it is invisible or we have set a flag to remove visible actions.
1817 if (actseq <= tid_clock || params->removevisible) {
1818 ModelAction * write;
1819 if (act->is_write()) {
1821 } else if (act->is_read()) {
1822 write = act->get_reads_from();
1826 //Mark everything earlier in MO graph to be freed
1827 CycleNode * cn = mo_graph->getNode_noCreate(write);
1829 queue->push_back(cn);
1830 while(!queue->empty()) {
1831 CycleNode * node = queue->back();
1833 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1834 CycleNode * prevnode = node->getInEdge(i);
1835 ModelAction * prevact = prevnode->getAction();
1836 if (prevact->get_type() != READY_FREE) {
1837 prevact->set_free();
1838 queue->push_back(prevnode);
1846 //We may need to remove read actions in the window we don't delete to preserve correctness.
1848 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1849 ModelAction *act = it2->getVal();
1850 //Do iteration early in case we delete the act
1852 bool islastact = false;
1853 ModelAction *lastact = get_last_action(act->get_tid());
1854 if (act == lastact) {
1855 Thread * th = get_thread(act);
1856 islastact = !th->is_complete();
1859 if (act->is_read()) {
1860 if (act->get_reads_from()->is_free()) {
1861 if (act->is_rmw()) {
1862 //Weaken a RMW from a freed store to a write
1863 act->set_type(ATOMIC_WRITE);
1875 //If we don't delete the action, we should remove references to release fences
1877 const ModelAction *rel_fence =act->get_last_fence_release();
1878 if (rel_fence != NULL) {
1879 modelclock_t relfenceseq = rel_fence->get_seq_number();
1880 thread_id_t relfence_tid = rel_fence->get_tid();
1881 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1882 //Remove references to irrelevant release fences
1883 if (relfenceseq <= tid_clock)
1884 act->set_last_fence_release(NULL);
1887 //Now we are in the window of old actions that we remove if possible
1888 for (;it != NULL;) {
1889 ModelAction *act = it->getVal();
1890 //Do iteration early since we may delete node...
1892 bool islastact = false;
1893 ModelAction *lastact = get_last_action(act->get_tid());
1894 if (act == lastact) {
1895 Thread * th = get_thread(act);
1896 islastact = !th->is_complete();
1899 if (act->is_read()) {
1900 if (act->get_reads_from()->is_free()) {
1901 if (act->is_rmw()) {
1902 act->set_type(ATOMIC_WRITE);
1912 } else if (act->is_free()) {
1919 } else if (act->is_write()) {
1920 //Do nothing with write that hasn't been marked to be freed
1921 } else if (islastact) {
1922 //Keep the last action for non-read/write actions
1923 } else if (act->is_fence()) {
1924 //Note that acquire fences can always be safely
1925 //removed, but could incur extra overheads in
1926 //traversals. Removing them before the cvmin seems
1927 //like a good compromise.
1929 //Release fences before the cvmin don't do anything
1930 //because everyone has already synchronized.
1932 //Sequentially fences before cvmin are redundant
1933 //because happens-before will enforce same
1936 modelclock_t actseq = act->get_seq_number();
1937 thread_id_t act_tid = act->get_tid();
1938 modelclock_t tid_clock = cvmin->getClock(act_tid);
1939 if (actseq <= tid_clock) {
1941 // Remove reference to act from thrd_last_fence_release
1942 int thread_id = id_to_int( act->get_tid() );
1943 if (thrd_last_fence_release[thread_id] == act) {
1944 thrd_last_fence_release[thread_id] = NULL;
1950 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1951 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1952 //need to keep most recent unlock/wait for each lock
1953 if(act->is_unlock() || act->is_wait()) {
1954 ModelAction * lastlock = get_last_unlock(act);
1955 if (lastlock != act) {
1960 } else if (act->is_create()) {
1961 if (act->get_thread_operand()->is_complete()) {
1973 //If we don't delete the action, we should remove references to release fences
1974 const ModelAction *rel_fence =act->get_last_fence_release();
1975 if (rel_fence != NULL) {
1976 modelclock_t relfenceseq = rel_fence->get_seq_number();
1977 thread_id_t relfence_tid = rel_fence->get_tid();
1978 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1979 //Remove references to irrelevant release fences
1980 if (relfenceseq <= tid_clock)
1981 act->set_last_fence_release(NULL);
1989 Fuzzer * ModelExecution::getFuzzer() {