11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
49 /** @brief Constructor */
50 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
54 thread_map(2), /* We'll always need at least 2 threads */
59 condvar_waiters_map(),
66 thrd_last_fence_release(),
67 priv(new struct model_snapshot_members ()),
68 mo_graph(new CycleGraph()),
70 fuzzer(new NewFuzzer()),
76 /* Initialize a model-checker thread, for special ModelActions */
77 model_thread = new Thread(get_next_id());
78 add_thread(model_thread);
79 fuzzer->register_engine(m, this);
80 scheduler->register_engine(this);
82 pthread_key_create(&pthreadkey, tlsdestructor);
86 /** @brief Destructor */
87 ModelExecution::~ModelExecution()
89 for (unsigned int i = 0;i < get_num_threads();i++)
90 delete get_thread(int_to_id(i));
96 int ModelExecution::get_execution_number() const
98 return model->get_execution_number();
101 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
103 SnapVector<action_list_t> *tmp = hash->get(ptr);
105 tmp = new SnapVector<action_list_t>();
111 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
113 simple_action_list_t *tmp = hash->get(ptr);
115 tmp = new simple_action_list_t();
121 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
123 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
125 tmp = new SnapVector<simple_action_list_t>();
132 * When vectors of action lists are reallocated due to resize, the root address of
133 * action lists may change. Hence we need to fix the parent pointer of the children
136 static void fixup_action_list (SnapVector<action_list_t> * vec)
138 for (uint i = 0; i < vec->size(); i++) {
139 action_list_t * list = &(*vec)[i];
145 /** @return a thread ID for a new Thread */
146 thread_id_t ModelExecution::get_next_id()
148 return priv->next_thread_id++;
151 /** @return the number of user threads created during this execution */
152 unsigned int ModelExecution::get_num_threads() const
154 return priv->next_thread_id;
157 /** @return a sequence number for a new ModelAction */
158 modelclock_t ModelExecution::get_next_seq_num()
160 return ++priv->used_sequence_numbers;
163 /** @return a sequence number for a new ModelAction */
164 modelclock_t ModelExecution::get_curr_seq_num()
166 return priv->used_sequence_numbers;
169 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
170 void ModelExecution::restore_last_seq_num()
172 priv->used_sequence_numbers--;
176 * @brief Should the current action wake up a given thread?
178 * @param curr The current action
179 * @param thread The thread that we might wake up
180 * @return True, if we should wake up the sleeping thread; false otherwise
182 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
184 const ModelAction *asleep = thread->get_pending();
185 /* Don't allow partial RMW to wake anyone up */
188 /* Synchronizing actions may have been backtracked */
189 if (asleep->could_synchronize_with(curr))
191 /* All acquire/release fences and fence-acquire/store-release */
192 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
194 /* Fence-release + store can awake load-acquire on the same location */
195 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
196 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
197 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
200 /* The sleep is literally sleeping */
201 if (asleep->is_sleep()) {
202 if (fuzzer->shouldWake(asleep))
209 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
211 for (unsigned int i = 0;i < get_num_threads();i++) {
212 Thread *thr = get_thread(int_to_id(i));
213 if (scheduler->is_sleep_set(thr)) {
214 if (should_wake_up(curr, thr)) {
215 /* Remove this thread from sleep set */
216 scheduler->remove_sleep(thr);
217 if (thr->get_pending()->is_sleep())
218 thr->set_wakeup_state(true);
224 void ModelExecution::assert_bug(const char *msg)
226 priv->bugs.push_back(new bug_message(msg));
230 /** @return True, if any bugs have been reported for this execution */
231 bool ModelExecution::have_bug_reports() const
233 return priv->bugs.size() != 0;
236 SnapVector<bug_message *> * ModelExecution::get_bugs() const
242 * Check whether the current trace has triggered an assertion which should halt
245 * @return True, if the execution should be aborted; false otherwise
247 bool ModelExecution::has_asserted() const
249 return priv->asserted;
253 * Trigger a trace assertion which should cause this execution to be halted.
254 * This can be due to a detected bug or due to an infeasibility that should
257 void ModelExecution::set_assert()
259 priv->asserted = true;
263 * Check if we are in a deadlock. Should only be called at the end of an
264 * execution, although it should not give false positives in the middle of an
265 * execution (there should be some ENABLED thread).
267 * @return True if program is in a deadlock; false otherwise
269 bool ModelExecution::is_deadlocked() const
271 bool blocking_threads = false;
272 for (unsigned int i = 0;i < get_num_threads();i++) {
273 thread_id_t tid = int_to_id(i);
276 Thread *t = get_thread(tid);
277 if (!t->is_model_thread() && t->get_pending())
278 blocking_threads = true;
280 return blocking_threads;
284 * Check if this is a complete execution. That is, have all thread completed
285 * execution (rather than exiting because sleep sets have forced a redundant
288 * @return True if the execution is complete.
290 bool ModelExecution::is_complete_execution() const
292 for (unsigned int i = 0;i < get_num_threads();i++)
293 if (is_enabled(int_to_id(i)))
298 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
299 uint64_t value = *((const uint64_t *) location);
300 modelclock_t storeclock;
301 thread_id_t storethread;
302 getStoreThreadAndClock(location, &storethread, &storeclock);
303 setAtomicStoreFlag(location);
304 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
305 act->set_seq_number(storeclock);
306 add_normal_write_to_lists(act);
307 add_write_to_lists(act);
308 w_modification_order(act);
310 model->get_history()->process_action(act, act->get_tid());
316 * Processes a read model action.
317 * @param curr is the read model action to process.
318 * @param rf_set is the set of model actions we can possibly read from
319 * @return True if the read can be pruned from the thread map list.
321 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
323 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
324 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
325 if (hasnonatomicstore) {
326 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
327 rf_set->push_back(nonatomicstore);
330 // Remove writes that violate read modification order
333 while (i < rf_set->size()) {
334 ModelAction * rf = (*rf_set)[i];
335 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
336 (*rf_set)[i] = rf_set->back();
343 int index = fuzzer->selectWrite(curr, rf_set);
345 ModelAction *rf = (*rf_set)[index];
348 bool canprune = false;
349 if (r_modification_order(curr, rf, priorset, &canprune)) {
350 for(unsigned int i=0;i<priorset->size();i++) {
351 mo_graph->addEdge((*priorset)[i], rf);
354 get_thread(curr)->set_return_value(curr->get_return_value());
356 //Update acquire fence clock vector
357 ClockVector * hbcv = get_hb_from_write(curr->get_reads_from());
359 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
360 return canprune && (curr->get_type() == ATOMIC_READ);
363 (*rf_set)[index] = rf_set->back();
369 * Processes a lock, trylock, or unlock model action. @param curr is
370 * the read model action to process.
372 * The try lock operation checks whether the lock is taken. If not,
373 * it falls to the normal lock operation case. If so, it returns
376 * The lock operation has already been checked that it is enabled, so
377 * it just grabs the lock and synchronizes with the previous unlock.
379 * The unlock operation has to re-enable all of the threads that are
380 * waiting on the lock.
382 * @return True if synchronization was updated; false otherwise
384 bool ModelExecution::process_mutex(ModelAction *curr)
386 cdsc::mutex *mutex = curr->get_mutex();
387 struct cdsc::mutex_state *state = NULL;
390 state = mutex->get_state();
392 switch (curr->get_type()) {
393 case ATOMIC_TRYLOCK: {
394 bool success = !state->locked;
395 curr->set_try_lock(success);
397 get_thread(curr)->set_return_value(0);
400 get_thread(curr)->set_return_value(1);
402 //otherwise fall into the lock case
404 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
405 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
406 // assert_bug("Lock access before initialization");
408 // TODO: lock count for recursive mutexes
409 state->locked = get_thread(curr);
410 ModelAction *unlock = get_last_unlock(curr);
411 //synchronize with the previous unlock statement
412 if (unlock != NULL) {
413 synchronize(unlock, curr);
419 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
420 if (fuzzer->shouldWait(curr)) {
421 /* wake up the other threads */
422 for (unsigned int i = 0;i < get_num_threads();i++) {
423 Thread *t = get_thread(int_to_id(i));
424 Thread *curr_thrd = get_thread(curr);
425 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
429 /* unlock the lock - after checking who was waiting on it */
430 state->locked = NULL;
432 /* remove old wait action and disable this thread */
433 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
434 for (sllnode<ModelAction *> * it = waiters->begin(); it != NULL; it = it->getNext()) {
435 ModelAction * wait = it->getVal();
436 if (wait->get_tid() == curr->get_tid()) {
442 waiters->push_back(curr);
443 scheduler->sleep(get_thread(curr));
448 case ATOMIC_TIMEDWAIT:
449 case ATOMIC_UNLOCK: {
450 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
451 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
452 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
453 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
454 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
455 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
456 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
457 //MUST EVENMTUALLY RELEASE...
459 // TODO: lock count for recursive mutexes
460 /* wake up the other threads */
461 for (unsigned int i = 0;i < get_num_threads();i++) {
462 Thread *t = get_thread(int_to_id(i));
463 Thread *curr_thrd = get_thread(curr);
464 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
468 /* unlock the lock - after checking who was waiting on it */
469 state->locked = NULL;
472 case ATOMIC_NOTIFY_ALL: {
473 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
474 //activate all the waiting threads
475 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
476 scheduler->wake(get_thread(rit->getVal()));
481 case ATOMIC_NOTIFY_ONE: {
482 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
483 if (waiters->size() != 0) {
484 Thread * thread = fuzzer->selectNotify(waiters);
485 scheduler->wake(thread);
497 * Process a write ModelAction
498 * @param curr The ModelAction to process
499 * @return True if the mo_graph was updated or promises were resolved
501 void ModelExecution::process_write(ModelAction *curr)
503 w_modification_order(curr);
504 get_thread(curr)->set_return_value(VALUE_NONE);
508 * Process a fence ModelAction
509 * @param curr The ModelAction to process
510 * @return True if synchronization was updated
512 void ModelExecution::process_fence(ModelAction *curr)
515 * fence-relaxed: no-op
516 * fence-release: only log the occurence (not in this function), for
517 * use in later synchronization
518 * fence-acquire (this function): search for hypothetical release
520 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
522 if (curr->is_acquire()) {
523 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
528 * @brief Process the current action for thread-related activity
530 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
531 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
532 * synchronization, etc. This function is a no-op for non-THREAD actions
533 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
535 * @param curr The current action
536 * @return True if synchronization was updated or a thread completed
538 void ModelExecution::process_thread_action(ModelAction *curr)
540 switch (curr->get_type()) {
541 case THREAD_CREATE: {
542 thrd_t *thrd = (thrd_t *)curr->get_location();
543 struct thread_params *params = (struct thread_params *)curr->get_value();
544 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
545 curr->set_thread_operand(th);
547 th->set_creation(curr);
550 case PTHREAD_CREATE: {
551 (*(uint32_t *)curr->get_location()) = pthread_counter++;
553 struct pthread_params *params = (struct pthread_params *)curr->get_value();
554 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
555 curr->set_thread_operand(th);
557 th->set_creation(curr);
559 if ( pthread_map.size() < pthread_counter )
560 pthread_map.resize( pthread_counter );
561 pthread_map[ pthread_counter-1 ] = th;
566 Thread *blocking = curr->get_thread_operand();
567 ModelAction *act = get_last_action(blocking->get_id());
568 synchronize(act, curr);
572 Thread *blocking = curr->get_thread_operand();
573 ModelAction *act = get_last_action(blocking->get_id());
574 synchronize(act, curr);
575 break; // WL: to be add (modified)
578 case THREADONLY_FINISH:
579 case THREAD_FINISH: {
580 Thread *th = get_thread(curr);
581 if (curr->get_type() == THREAD_FINISH &&
582 th == model->getInitThread()) {
588 /* Wake up any joining threads */
589 for (unsigned int i = 0;i < get_num_threads();i++) {
590 Thread *waiting = get_thread(int_to_id(i));
591 if (waiting->waiting_on() == th &&
592 waiting->get_pending()->is_thread_join())
593 scheduler->wake(waiting);
602 Thread *th = get_thread(curr);
603 th->set_pending(curr);
604 scheduler->add_sleep(th);
613 * Initialize the current action by performing one or more of the following
614 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
615 * manipulating backtracking sets, allocating and
616 * initializing clock vectors, and computing the promises to fulfill.
618 * @param curr The current action, as passed from the user context; may be
619 * freed/invalidated after the execution of this function, with a different
620 * action "returned" its place (pass-by-reference)
621 * @return True if curr is a newly-explored action; false otherwise
623 bool ModelExecution::initialize_curr_action(ModelAction **curr)
625 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
626 ModelAction *newcurr = process_rmw(*curr);
632 ModelAction *newcurr = *curr;
634 newcurr->set_seq_number(get_next_seq_num());
635 /* Always compute new clock vector */
636 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
638 /* Assign most recent release fence */
639 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
641 return true; /* This was a new ModelAction */
646 * @brief Establish reads-from relation between two actions
648 * Perform basic operations involved with establishing a concrete rf relation,
649 * including setting the ModelAction data and checking for release sequences.
651 * @param act The action that is reading (must be a read)
652 * @param rf The action from which we are reading (must be a write)
654 * @return True if this read established synchronization
657 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
660 ASSERT(rf->is_write());
662 act->set_read_from(rf);
663 if (act->is_acquire()) {
664 ClockVector *cv = get_hb_from_write(rf);
667 act->get_cv()->merge(cv);
672 * @brief Synchronizes two actions
674 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
675 * This function performs the synchronization as well as providing other hooks
676 * for other checks along with synchronization.
678 * @param first The left-hand side of the synchronizes-with relation
679 * @param second The right-hand side of the synchronizes-with relation
680 * @return True if the synchronization was successful (i.e., was consistent
681 * with the execution order); false otherwise
683 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
685 if (*second < *first) {
686 ASSERT(0); //This should not happend
689 return second->synchronize_with(first);
693 * @brief Check whether a model action is enabled.
695 * Checks whether an operation would be successful (i.e., is a lock already
696 * locked, or is the joined thread already complete).
698 * For yield-blocking, yields are never enabled.
700 * @param curr is the ModelAction to check whether it is enabled.
701 * @return a bool that indicates whether the action is enabled.
703 bool ModelExecution::check_action_enabled(ModelAction *curr) {
704 if (curr->is_lock()) {
705 cdsc::mutex *lock = curr->get_mutex();
706 struct cdsc::mutex_state *state = lock->get_state();
708 Thread *lock_owner = (Thread *)state->locked;
709 Thread *curr_thread = get_thread(curr);
710 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
716 } else if (curr->is_thread_join()) {
717 Thread *blocking = curr->get_thread_operand();
718 if (!blocking->is_complete()) {
721 } else if (curr->is_sleep()) {
722 if (!fuzzer->shouldSleep(curr))
730 * This is the heart of the model checker routine. It performs model-checking
731 * actions corresponding to a given "current action." Among other processes, it
732 * calculates reads-from relationships, updates synchronization clock vectors,
733 * forms a memory_order constraints graph, and handles replay/backtrack
734 * execution when running permutations of previously-observed executions.
736 * @param curr The current action to process
737 * @return The ModelAction that is actually executed; may be different than
740 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
743 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
744 bool newly_explored = initialize_curr_action(&curr);
748 wake_up_sleeping_actions(curr);
750 SnapVector<ModelAction *> * rf_set = NULL;
751 /* Build may_read_from set for newly-created actions */
752 if (newly_explored && curr->is_read())
753 rf_set = build_may_read_from(curr);
755 bool canprune = false;
757 if (curr->is_read() && !second_part_of_rmw) {
758 canprune = process_read(curr, rf_set);
761 ASSERT(rf_set == NULL);
763 /* Add the action to lists */
764 if (!second_part_of_rmw)
765 add_action_to_lists(curr, canprune);
767 if (curr->is_write())
768 add_write_to_lists(curr);
770 process_thread_action(curr);
772 if (curr->is_write())
775 if (curr->is_fence())
778 if (curr->is_mutex_op())
784 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
785 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
786 ModelAction *lastread = get_last_action(act->get_tid());
787 lastread->process_rmw(act);
789 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
795 * @brief Updates the mo_graph with the constraints imposed from the current
798 * Basic idea is the following: Go through each other thread and find
799 * the last action that happened before our read. Two cases:
801 * -# The action is a write: that write must either occur before
802 * the write we read from or be the write we read from.
803 * -# The action is a read: the write that that action read from
804 * must occur before the write we read from or be the same write.
806 * @param curr The current action. Must be a read.
807 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
808 * @param check_only If true, then only check whether the current action satisfies
809 * read modification order or not, without modifiying priorset and canprune.
811 * @return True if modification order edges were added; false otherwise
814 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
815 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
817 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
818 ASSERT(curr->is_read());
820 /* Last SC fence in the current thread */
821 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
823 int tid = curr->get_tid();
825 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
826 if ((int)thrd_lists->size() <= tid) {
827 uint oldsize = thrd_lists->size();
828 thrd_lists->resize(priv->next_thread_id);
829 for(uint i = oldsize;i < priv->next_thread_id;i++)
830 new (&(*thrd_lists)[i]) action_list_t();
832 fixup_action_list(thrd_lists);
835 ModelAction *prev_same_thread = NULL;
836 /* Iterate over all threads */
837 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
838 /* Last SC fence in thread tid */
839 ModelAction *last_sc_fence_thread_local = NULL;
841 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
843 /* Last SC fence in thread tid, before last SC fence in current thread */
844 ModelAction *last_sc_fence_thread_before = NULL;
845 if (last_sc_fence_local)
846 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
848 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
849 if (prev_same_thread != NULL &&
850 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
851 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
855 /* Iterate over actions in thread, starting from most recent */
856 action_list_t *list = &(*thrd_lists)[tid];
857 sllnode<ModelAction *> * rit;
858 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
859 ModelAction *act = rit->getVal();
864 /* Don't want to add reflexive edges on 'rf' */
865 if (act->equals(rf)) {
866 if (act->happens_before(curr))
872 if (act->is_write()) {
873 /* C++, Section 29.3 statement 5 */
874 if (curr->is_seqcst() && last_sc_fence_thread_local &&
875 *act < *last_sc_fence_thread_local) {
876 if (mo_graph->checkReachable(rf, act))
879 priorset->push_back(act);
882 /* C++, Section 29.3 statement 4 */
883 else if (act->is_seqcst() && last_sc_fence_local &&
884 *act < *last_sc_fence_local) {
885 if (mo_graph->checkReachable(rf, act))
888 priorset->push_back(act);
891 /* C++, Section 29.3 statement 6 */
892 else if (last_sc_fence_thread_before &&
893 *act < *last_sc_fence_thread_before) {
894 if (mo_graph->checkReachable(rf, act))
897 priorset->push_back(act);
903 * Include at most one act per-thread that "happens
906 if (act->happens_before(curr)) {
908 if (last_sc_fence_local == NULL ||
909 (*last_sc_fence_local < *act)) {
910 prev_same_thread = act;
913 if (act->is_write()) {
914 if (mo_graph->checkReachable(rf, act))
917 priorset->push_back(act);
919 ModelAction *prevrf = act->get_reads_from();
920 if (!prevrf->equals(rf)) {
921 if (mo_graph->checkReachable(rf, prevrf))
924 priorset->push_back(prevrf);
926 if (act->get_tid() == curr->get_tid()) {
927 //Can prune curr from obj list
941 * Updates the mo_graph with the constraints imposed from the current write.
943 * Basic idea is the following: Go through each other thread and find
944 * the lastest action that happened before our write. Two cases:
946 * (1) The action is a write => that write must occur before
949 * (2) The action is a read => the write that that action read from
950 * must occur before the current write.
952 * This method also handles two other issues:
954 * (I) Sequential Consistency: Making sure that if the current write is
955 * seq_cst, that it occurs after the previous seq_cst write.
957 * (II) Sending the write back to non-synchronizing reads.
959 * @param curr The current action. Must be a write.
960 * @param send_fv A vector for stashing reads to which we may pass our future
961 * value. If NULL, then don't record any future values.
962 * @return True if modification order edges were added; false otherwise
964 void ModelExecution::w_modification_order(ModelAction *curr)
966 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
968 ASSERT(curr->is_write());
970 SnapList<ModelAction *> edgeset;
972 if (curr->is_seqcst()) {
973 /* We have to at least see the last sequentially consistent write,
974 so we are initialized. */
975 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
976 if (last_seq_cst != NULL) {
977 edgeset.push_back(last_seq_cst);
979 //update map for next query
980 obj_last_sc_map.put(curr->get_location(), curr);
983 /* Last SC fence in the current thread */
984 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
986 /* Iterate over all threads */
987 for (i = 0;i < thrd_lists->size();i++) {
988 /* Last SC fence in thread i, before last SC fence in current thread */
989 ModelAction *last_sc_fence_thread_before = NULL;
990 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
991 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
993 /* Iterate over actions in thread, starting from most recent */
994 action_list_t *list = &(*thrd_lists)[i];
995 sllnode<ModelAction*>* rit;
996 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
997 ModelAction *act = rit->getVal();
1000 * 1) If RMW and it actually read from something, then we
1001 * already have all relevant edges, so just skip to next
1004 * 2) If RMW and it didn't read from anything, we should
1005 * whatever edge we can get to speed up convergence.
1007 * 3) If normal write, we need to look at earlier actions, so
1008 * continue processing list.
1010 if (curr->is_rmw()) {
1011 if (curr->get_reads_from() != NULL)
1019 /* C++, Section 29.3 statement 7 */
1020 if (last_sc_fence_thread_before && act->is_write() &&
1021 *act < *last_sc_fence_thread_before) {
1022 edgeset.push_back(act);
1027 * Include at most one act per-thread that "happens
1030 if (act->happens_before(curr)) {
1032 * Note: if act is RMW, just add edge:
1034 * The following edge should be handled elsewhere:
1035 * readfrom(act) --mo--> act
1037 if (act->is_write())
1038 edgeset.push_back(act);
1039 else if (act->is_read()) {
1040 //if previous read accessed a null, just keep going
1041 edgeset.push_back(act->get_reads_from());
1047 mo_graph->addEdges(&edgeset, curr);
1052 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1053 * some constraints. This method checks one the following constraint (others
1054 * require compiler support):
1056 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1057 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1059 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1061 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1063 /* Iterate over all threads */
1064 for (i = 0;i < thrd_lists->size();i++) {
1065 const ModelAction *write_after_read = NULL;
1067 /* Iterate over actions in thread, starting from most recent */
1068 action_list_t *list = &(*thrd_lists)[i];
1069 sllnode<ModelAction *>* rit;
1070 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1071 ModelAction *act = rit->getVal();
1073 /* Don't disallow due to act == reader */
1074 if (!reader->happens_before(act) || reader == act)
1076 else if (act->is_write())
1077 write_after_read = act;
1078 else if (act->is_read() && act->get_reads_from() != NULL)
1079 write_after_read = act->get_reads_from();
1082 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1089 * Computes the clock vector that happens before propagates from this write.
1091 * @param rf The action that might be part of a release sequence. Must be a
1093 * @return ClockVector of happens before relation.
1096 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1097 SnapVector<ModelAction *> * processset = NULL;
1098 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1099 ASSERT(rf->is_write());
1100 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1102 if (processset == NULL)
1103 processset = new SnapVector<ModelAction *>();
1104 processset->push_back(rf);
1107 int i = (processset == NULL) ? 0 : processset->size();
1109 ClockVector * vec = NULL;
1111 if (rf->get_rfcv() != NULL) {
1112 vec = rf->get_rfcv();
1113 } else if (rf->is_acquire() && rf->is_release()) {
1115 } else if (rf->is_release() && !rf->is_rmw()) {
1117 } else if (rf->is_release()) {
1118 //have rmw that is release and doesn't have a rfcv
1119 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1122 //operation that isn't release
1123 if (rf->get_last_fence_release()) {
1125 vec = rf->get_last_fence_release()->get_cv();
1127 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1133 rf = (*processset)[i];
1137 if (processset != NULL)
1143 * Performs various bookkeeping operations for the current ModelAction. For
1144 * instance, adds action to the per-object, per-thread action vector and to the
1145 * action trace list of all thread actions.
1147 * @param act is the ModelAction to add.
1149 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1151 int tid = id_to_int(act->get_tid());
1152 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1153 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1154 act->setActionRef(list->add_back(act));
1157 // Update action trace, a total order of all actions
1158 action_trace.addAction(act);
1161 // Update obj_thrd_map, a per location, per thread, order of actions
1162 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1163 if ((int)vec->size() <= tid) {
1164 uint oldsize = vec->size();
1165 vec->resize(priv->next_thread_id);
1166 for(uint i = oldsize;i < priv->next_thread_id;i++)
1167 new (&(*vec)[i]) action_list_t();
1169 fixup_action_list(vec);
1171 if (!canprune && (act->is_read() || act->is_write()))
1172 (*vec)[tid].addAction(act);
1174 // Update thrd_last_action, the last action taken by each thread
1175 if ((int)thrd_last_action.size() <= tid)
1176 thrd_last_action.resize(get_num_threads());
1177 thrd_last_action[tid] = act;
1179 // Update thrd_last_fence_release, the last release fence taken by each thread
1180 if (act->is_fence() && act->is_release()) {
1181 if ((int)thrd_last_fence_release.size() <= tid)
1182 thrd_last_fence_release.resize(get_num_threads());
1183 thrd_last_fence_release[tid] = act;
1186 if (act->is_wait()) {
1187 void *mutex_loc = (void *) act->get_value();
1188 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1192 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1193 list->addAction(act);
1196 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1197 act->create_cv(NULL);
1198 list->addAction(act);
1202 * Performs various bookkeeping operations for a normal write. The
1203 * complication is that we are typically inserting a normal write
1204 * lazily, so we need to insert it into the middle of lists.
1206 * @param act is the ModelAction to add.
1209 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1211 int tid = id_to_int(act->get_tid());
1212 insertIntoActionListAndSetCV(&action_trace, act);
1214 // Update obj_thrd_map, a per location, per thread, order of actions
1215 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1216 if (tid >= (int)vec->size()) {
1217 uint oldsize =vec->size();
1218 vec->resize(priv->next_thread_id);
1219 for(uint i=oldsize;i<priv->next_thread_id;i++)
1220 new (&(*vec)[i]) action_list_t();
1222 fixup_action_list(vec);
1224 insertIntoActionList(&(*vec)[tid],act);
1226 ModelAction * lastact = thrd_last_action[tid];
1227 // Update thrd_last_action, the last action taken by each thrad
1228 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1229 thrd_last_action[tid] = act;
1233 void ModelExecution::add_write_to_lists(ModelAction *write) {
1234 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1235 int tid = id_to_int(write->get_tid());
1236 if (tid >= (int)vec->size()) {
1237 uint oldsize =vec->size();
1238 vec->resize(priv->next_thread_id);
1239 for(uint i=oldsize;i<priv->next_thread_id;i++)
1240 new (&(*vec)[i]) simple_action_list_t();
1242 write->setActionRef((*vec)[tid].add_back(write));
1246 * @brief Get the last action performed by a particular Thread
1247 * @param tid The thread ID of the Thread in question
1248 * @return The last action in the thread
1250 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1252 int threadid = id_to_int(tid);
1253 if (threadid < (int)thrd_last_action.size())
1254 return thrd_last_action[id_to_int(tid)];
1260 * @brief Get the last fence release performed by a particular Thread
1261 * @param tid The thread ID of the Thread in question
1262 * @return The last fence release in the thread, if one exists; NULL otherwise
1264 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1266 int threadid = id_to_int(tid);
1267 if (threadid < (int)thrd_last_fence_release.size())
1268 return thrd_last_fence_release[id_to_int(tid)];
1274 * Gets the last memory_order_seq_cst write (in the total global sequence)
1275 * performed on a particular object (i.e., memory location), not including the
1277 * @param curr The current ModelAction; also denotes the object location to
1279 * @return The last seq_cst write
1281 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1283 void *location = curr->get_location();
1284 return obj_last_sc_map.get(location);
1288 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1289 * performed in a particular thread, prior to a particular fence.
1290 * @param tid The ID of the thread to check
1291 * @param before_fence The fence from which to begin the search; if NULL, then
1292 * search for the most recent fence in the thread.
1293 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1295 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1297 /* All fences should have location FENCE_LOCATION */
1298 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1303 sllnode<ModelAction*>* rit = list->end();
1306 for (;rit != NULL;rit=rit->getPrev())
1307 if (rit->getVal() == before_fence)
1310 ASSERT(rit->getVal() == before_fence);
1314 for (;rit != NULL;rit=rit->getPrev()) {
1315 ModelAction *act = rit->getVal();
1316 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1323 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1324 * location). This function identifies the mutex according to the current
1325 * action, which is presumed to perform on the same mutex.
1326 * @param curr The current ModelAction; also denotes the object location to
1328 * @return The last unlock operation
1330 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1332 void *location = curr->get_location();
1334 simple_action_list_t *list = obj_map.get(location);
1338 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1339 sllnode<ModelAction*>* rit;
1340 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1341 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1342 return rit->getVal();
1346 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1348 ModelAction *parent = get_last_action(tid);
1350 parent = get_thread(tid)->get_creation();
1355 * Returns the clock vector for a given thread.
1356 * @param tid The thread whose clock vector we want
1357 * @return Desired clock vector
1359 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1361 ModelAction *firstaction=get_parent_action(tid);
1362 return firstaction != NULL ? firstaction->get_cv() : NULL;
1365 bool valequals(uint64_t val1, uint64_t val2, int size) {
1368 return ((uint8_t)val1) == ((uint8_t)val2);
1370 return ((uint16_t)val1) == ((uint16_t)val2);
1372 return ((uint32_t)val1) == ((uint32_t)val2);
1382 * Build up an initial set of all past writes that this 'read' action may read
1383 * from, as well as any previously-observed future values that must still be valid.
1385 * @param curr is the current ModelAction that we are exploring; it must be a
1388 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1390 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1392 ASSERT(curr->is_read());
1394 ModelAction *last_sc_write = NULL;
1396 if (curr->is_seqcst())
1397 last_sc_write = get_last_seq_cst_write(curr);
1399 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1401 /* Iterate over all threads */
1402 if (thrd_lists != NULL)
1403 for (i = 0;i < thrd_lists->size();i++) {
1404 /* Iterate over actions in thread, starting from most recent */
1405 simple_action_list_t *list = &(*thrd_lists)[i];
1406 sllnode<ModelAction *> * rit;
1407 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1408 ModelAction *act = rit->getVal();
1413 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1414 bool allow_read = true;
1416 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1419 /* Need to check whether we will have two RMW reading from the same value */
1420 if (curr->is_rmwr()) {
1421 /* It is okay if we have a failing CAS */
1422 if (!curr->is_rmwrcas() ||
1423 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1424 //Need to make sure we aren't the second RMW
1425 CycleNode * node = mo_graph->getNode_noCreate(act);
1426 if (node != NULL && node->getRMW() != NULL) {
1427 //we are the second RMW
1434 /* Only add feasible reads */
1435 rf_set->push_back(act);
1438 /* Include at most one act per-thread that "happens before" curr */
1439 if (act->happens_before(curr))
1444 if (DBG_ENABLED()) {
1445 model_print("Reached read action:\n");
1447 model_print("End printing read_from_past\n");
1452 static void print_list(action_list_t *list)
1454 sllnode<ModelAction*> *it;
1456 model_print("------------------------------------------------------------------------------------\n");
1457 model_print("# t Action type MO Location Value Rf CV\n");
1458 model_print("------------------------------------------------------------------------------------\n");
1460 unsigned int hash = 0;
1462 for (it = list->begin();it != NULL;it=it->getNext()) {
1463 const ModelAction *act = it->getVal();
1464 if (act->get_seq_number() > 0)
1466 hash = hash^(hash<<3)^(it->getVal()->hash());
1468 model_print("HASH %u\n", hash);
1469 model_print("------------------------------------------------------------------------------------\n");
1472 #if SUPPORT_MOD_ORDER_DUMP
1473 void ModelExecution::dumpGraph(char *filename)
1476 sprintf(buffer, "%s.dot", filename);
1477 FILE *file = fopen(buffer, "w");
1478 fprintf(file, "digraph %s {\n", filename);
1479 mo_graph->dumpNodes(file);
1480 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1482 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1483 ModelAction *act = it->getVal();
1484 if (act->is_read()) {
1485 mo_graph->dot_print_node(file, act);
1486 mo_graph->dot_print_edge(file,
1487 act->get_reads_from(),
1489 "label=\"rf\", color=red, weight=2");
1491 if (thread_array[act->get_tid()]) {
1492 mo_graph->dot_print_edge(file,
1493 thread_array[id_to_int(act->get_tid())],
1495 "label=\"sb\", color=blue, weight=400");
1498 thread_array[act->get_tid()] = act;
1500 fprintf(file, "}\n");
1501 model_free(thread_array);
1506 /** @brief Prints an execution trace summary. */
1507 void ModelExecution::print_summary()
1509 #if SUPPORT_MOD_ORDER_DUMP
1510 char buffername[100];
1511 sprintf(buffername, "exec%04u", get_execution_number());
1512 mo_graph->dumpGraphToFile(buffername);
1513 sprintf(buffername, "graph%04u", get_execution_number());
1514 dumpGraph(buffername);
1517 model_print("Execution trace %d:", get_execution_number());
1518 if (scheduler->all_threads_sleeping())
1519 model_print(" SLEEP-SET REDUNDANT");
1520 if (have_bug_reports())
1521 model_print(" DETECTED BUG(S)");
1525 print_list(&action_trace);
1530 void ModelExecution::print_tail()
1532 model_print("Execution trace %d:\n", get_execution_number());
1534 sllnode<ModelAction*> *it;
1536 model_print("------------------------------------------------------------------------------------\n");
1537 model_print("# t Action type MO Location Value Rf CV\n");
1538 model_print("------------------------------------------------------------------------------------\n");
1540 unsigned int hash = 0;
1544 SnapList<ModelAction *> list;
1545 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1546 if (counter > length)
1549 ModelAction * act = it->getVal();
1550 list.push_front(act);
1554 for (it = list.begin();it != NULL;it=it->getNext()) {
1555 const ModelAction *act = it->getVal();
1556 if (act->get_seq_number() > 0)
1558 hash = hash^(hash<<3)^(it->getVal()->hash());
1560 model_print("HASH %u\n", hash);
1561 model_print("------------------------------------------------------------------------------------\n");
1565 * Add a Thread to the system for the first time. Should only be called once
1567 * @param t The Thread to add
1569 void ModelExecution::add_thread(Thread *t)
1571 unsigned int i = id_to_int(t->get_id());
1572 if (i >= thread_map.size())
1573 thread_map.resize(i + 1);
1575 if (!t->is_model_thread())
1576 scheduler->add_thread(t);
1580 * @brief Get a Thread reference by its ID
1581 * @param tid The Thread's ID
1582 * @return A Thread reference
1584 Thread * ModelExecution::get_thread(thread_id_t tid) const
1586 unsigned int i = id_to_int(tid);
1587 if (i < thread_map.size())
1588 return thread_map[i];
1593 * @brief Get a reference to the Thread in which a ModelAction was executed
1594 * @param act The ModelAction
1595 * @return A Thread reference
1597 Thread * ModelExecution::get_thread(const ModelAction *act) const
1599 return get_thread(act->get_tid());
1603 * @brief Get a Thread reference by its pthread ID
1604 * @param index The pthread's ID
1605 * @return A Thread reference
1607 Thread * ModelExecution::get_pthread(pthread_t pid) {
1608 // pid 1 is reserved for the main thread, pthread ids should start from 2
1610 return get_thread(pid);
1617 uint32_t thread_id = x.v;
1619 if (thread_id < pthread_counter + 1)
1620 return pthread_map[thread_id];
1626 * @brief Check if a Thread is currently enabled
1627 * @param t The Thread to check
1628 * @return True if the Thread is currently enabled
1630 bool ModelExecution::is_enabled(Thread *t) const
1632 return scheduler->is_enabled(t);
1636 * @brief Check if a Thread is currently enabled
1637 * @param tid The ID of the Thread to check
1638 * @return True if the Thread is currently enabled
1640 bool ModelExecution::is_enabled(thread_id_t tid) const
1642 return scheduler->is_enabled(tid);
1646 * @brief Select the next thread to execute based on the curren action
1648 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1649 * actions should be followed by the execution of their child thread. In either
1650 * case, the current action should determine the next thread schedule.
1652 * @param curr The current action
1653 * @return The next thread to run, if the current action will determine this
1654 * selection; otherwise NULL
1656 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1658 /* Do not split atomic RMW */
1659 if (curr->is_rmwr())
1660 return get_thread(curr);
1661 /* Follow CREATE with the created thread */
1662 /* which is not needed, because model.cc takes care of this */
1663 if (curr->get_type() == THREAD_CREATE)
1664 return curr->get_thread_operand();
1665 if (curr->get_type() == PTHREAD_CREATE) {
1666 return curr->get_thread_operand();
1672 * Takes the next step in the execution, if possible.
1673 * @param curr The current step to take
1674 * @return Returns the next Thread to run, if any; NULL if this execution
1677 Thread * ModelExecution::take_step(ModelAction *curr)
1679 Thread *curr_thrd = get_thread(curr);
1680 ASSERT(curr_thrd->get_state() == THREAD_READY);
1682 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1683 curr = check_current_action(curr);
1686 /* Process this action in ModelHistory for records */
1688 model->get_history()->process_action( curr, curr->get_tid() );
1690 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1691 scheduler->remove_thread(curr_thrd);
1693 return action_select_next_thread(curr);
1696 /** This method removes references to an Action before we delete it. */
1698 void ModelExecution::removeAction(ModelAction *act) {
1700 action_trace.removeAction(act);
1703 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1704 (*vec)[act->get_tid()].removeAction(act);
1706 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1707 sllnode<ModelAction *> * listref = act->getActionRef();
1708 if (listref != NULL) {
1709 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1710 list->erase(listref);
1712 } else if (act->is_wait()) {
1713 sllnode<ModelAction *> * listref = act->getActionRef();
1714 if (listref != NULL) {
1715 void *mutex_loc = (void *) act->get_value();
1716 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1718 } else if (act->is_free()) {
1719 sllnode<ModelAction *> * listref = act->getActionRef();
1720 if (listref != NULL) {
1721 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1722 (*vec)[act->get_tid()].erase(listref);
1725 //Clear it from last_sc_map
1726 if (obj_last_sc_map.get(act->get_location()) == act) {
1727 obj_last_sc_map.remove(act->get_location());
1730 //Remove from Cyclegraph
1731 mo_graph->freeAction(act);
1735 /** Computes clock vector that all running threads have already synchronized to. */
1737 ClockVector * ModelExecution::computeMinimalCV() {
1738 ClockVector *cvmin = NULL;
1739 //Thread 0 isn't a real thread, so skip it..
1740 for(unsigned int i = 1;i < thread_map.size();i++) {
1741 Thread * t = thread_map[i];
1742 if (t->get_state() == THREAD_COMPLETED)
1744 thread_id_t tid = int_to_id(i);
1745 ClockVector * cv = get_cv(tid);
1747 cvmin = new ClockVector(cv, NULL);
1749 cvmin->minmerge(cv);
1755 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1757 void ModelExecution::fixupLastAct(ModelAction *act) {
1758 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1759 newact->set_seq_number(get_next_seq_num());
1760 newact->create_cv(act);
1761 newact->set_last_fence_release(act->get_last_fence_release());
1762 add_action_to_lists(newact, false);
1765 /** Compute which actions to free. */
1767 void ModelExecution::collectActions() {
1768 if (priv->used_sequence_numbers < params->traceminsize)
1771 //Compute minimal clock vector for all live threads
1772 ClockVector *cvmin = computeMinimalCV();
1773 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1774 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1776 //Next walk action trace... When we hit an action, see if it is
1777 //invisible (e.g., earlier than the first before the minimum
1778 //clock for the thread... if so erase it and all previous
1779 //actions in cyclegraph
1780 sllnode<ModelAction*> * it;
1781 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1782 ModelAction *act = it->getVal();
1783 modelclock_t actseq = act->get_seq_number();
1785 //See if we are done
1786 if (actseq > maxtofree)
1789 thread_id_t act_tid = act->get_tid();
1790 modelclock_t tid_clock = cvmin->getClock(act_tid);
1792 //Free if it is invisible or we have set a flag to remove visible actions.
1793 if (actseq <= tid_clock || params->removevisible) {
1794 ModelAction * write;
1795 if (act->is_write()) {
1797 } else if (act->is_read()) {
1798 write = act->get_reads_from();
1802 //Mark everything earlier in MO graph to be freed
1803 CycleNode * cn = mo_graph->getNode_noCreate(write);
1805 queue->push_back(cn);
1806 while(!queue->empty()) {
1807 CycleNode * node = queue->back();
1809 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1810 CycleNode * prevnode = node->getInEdge(i);
1811 ModelAction * prevact = prevnode->getAction();
1812 if (prevact->get_type() != READY_FREE) {
1813 prevact->set_free();
1814 queue->push_back(prevnode);
1822 //We may need to remove read actions in the window we don't delete to preserve correctness.
1824 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1825 ModelAction *act = it2->getVal();
1826 //Do iteration early in case we delete the act
1828 bool islastact = false;
1829 ModelAction *lastact = get_last_action(act->get_tid());
1830 if (act == lastact) {
1831 Thread * th = get_thread(act);
1832 islastact = !th->is_complete();
1835 if (act->is_read()) {
1836 if (act->get_reads_from()->is_free()) {
1837 if (act->is_rmw()) {
1838 //Weaken a RMW from a freed store to a write
1839 act->set_type(ATOMIC_WRITE);
1851 //If we don't delete the action, we should remove references to release fences
1853 const ModelAction *rel_fence =act->get_last_fence_release();
1854 if (rel_fence != NULL) {
1855 modelclock_t relfenceseq = rel_fence->get_seq_number();
1856 thread_id_t relfence_tid = rel_fence->get_tid();
1857 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1858 //Remove references to irrelevant release fences
1859 if (relfenceseq <= tid_clock)
1860 act->set_last_fence_release(NULL);
1863 //Now we are in the window of old actions that we remove if possible
1864 for (;it != NULL;) {
1865 ModelAction *act = it->getVal();
1866 //Do iteration early since we may delete node...
1868 bool islastact = false;
1869 ModelAction *lastact = get_last_action(act->get_tid());
1870 if (act == lastact) {
1871 Thread * th = get_thread(act);
1872 islastact = !th->is_complete();
1875 if (act->is_read()) {
1876 if (act->get_reads_from()->is_free()) {
1877 if (act->is_rmw()) {
1878 act->set_type(ATOMIC_WRITE);
1888 } else if (act->is_free()) {
1895 } else if (act->is_write()) {
1896 //Do nothing with write that hasn't been marked to be freed
1897 } else if (islastact) {
1898 //Keep the last action for non-read/write actions
1899 } else if (act->is_fence()) {
1900 //Note that acquire fences can always be safely
1901 //removed, but could incur extra overheads in
1902 //traversals. Removing them before the cvmin seems
1903 //like a good compromise.
1905 //Release fences before the cvmin don't do anything
1906 //because everyone has already synchronized.
1908 //Sequentially fences before cvmin are redundant
1909 //because happens-before will enforce same
1912 modelclock_t actseq = act->get_seq_number();
1913 thread_id_t act_tid = act->get_tid();
1914 modelclock_t tid_clock = cvmin->getClock(act_tid);
1915 if (actseq <= tid_clock) {
1917 // Remove reference to act from thrd_last_fence_release
1918 int thread_id = id_to_int( act->get_tid() );
1919 if (thrd_last_fence_release[thread_id] == act) {
1920 thrd_last_fence_release[thread_id] = NULL;
1926 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1927 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1928 //need to keep most recent unlock/wait for each lock
1929 if(act->is_unlock() || act->is_wait()) {
1930 ModelAction * lastlock = get_last_unlock(act);
1931 if (lastlock != act) {
1936 } else if (act->is_create()) {
1937 if (act->get_thread_operand()->is_complete()) {
1949 //If we don't delete the action, we should remove references to release fences
1950 const ModelAction *rel_fence =act->get_last_fence_release();
1951 if (rel_fence != NULL) {
1952 modelclock_t relfenceseq = rel_fence->get_seq_number();
1953 thread_id_t relfence_tid = rel_fence->get_tid();
1954 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1955 //Remove references to irrelevant release fences
1956 if (relfenceseq <= tid_clock)
1957 act->set_last_fence_release(NULL);
1965 Fuzzer * ModelExecution::getFuzzer() {