11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
49 /** @brief Constructor */
50 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
54 thread_map(2), /* We'll always need at least 2 threads */
59 condvar_waiters_map(),
66 thrd_last_fence_release(),
67 priv(new struct model_snapshot_members ()),
68 mo_graph(new CycleGraph()),
70 fuzzer(new NewFuzzer()),
76 /* Initialize a model-checker thread, for special ModelActions */
77 model_thread = new Thread(get_next_id());
78 add_thread(model_thread);
79 fuzzer->register_engine(m, this);
80 scheduler->register_engine(this);
82 pthread_key_create(&pthreadkey, tlsdestructor);
86 /** @brief Destructor */
87 ModelExecution::~ModelExecution()
89 for (unsigned int i = 0;i < get_num_threads();i++)
90 delete get_thread(int_to_id(i));
96 int ModelExecution::get_execution_number() const
98 return model->get_execution_number();
101 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
103 action_list_t *tmp = hash->get(ptr);
105 tmp = new action_list_t();
111 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
113 SnapVector<action_list_t> *tmp = hash->get(ptr);
115 tmp = new SnapVector<action_list_t>();
121 /** @return a thread ID for a new Thread */
122 thread_id_t ModelExecution::get_next_id()
124 return priv->next_thread_id++;
127 /** @return the number of user threads created during this execution */
128 unsigned int ModelExecution::get_num_threads() const
130 return priv->next_thread_id;
133 /** @return a sequence number for a new ModelAction */
134 modelclock_t ModelExecution::get_next_seq_num()
136 return ++priv->used_sequence_numbers;
139 /** @return a sequence number for a new ModelAction */
140 modelclock_t ModelExecution::get_curr_seq_num()
142 return priv->used_sequence_numbers;
145 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
146 void ModelExecution::restore_last_seq_num()
148 priv->used_sequence_numbers--;
152 * @brief Should the current action wake up a given thread?
154 * @param curr The current action
155 * @param thread The thread that we might wake up
156 * @return True, if we should wake up the sleeping thread; false otherwise
158 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
160 const ModelAction *asleep = thread->get_pending();
161 /* Don't allow partial RMW to wake anyone up */
164 /* Synchronizing actions may have been backtracked */
165 if (asleep->could_synchronize_with(curr))
167 /* All acquire/release fences and fence-acquire/store-release */
168 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
170 /* Fence-release + store can awake load-acquire on the same location */
171 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
172 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
173 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
176 /* The sleep is literally sleeping */
177 if (asleep->is_sleep()) {
178 if (fuzzer->shouldWake(asleep))
185 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
187 for (unsigned int i = 0;i < get_num_threads();i++) {
188 Thread *thr = get_thread(int_to_id(i));
189 if (scheduler->is_sleep_set(thr)) {
190 if (should_wake_up(curr, thr)) {
191 /* Remove this thread from sleep set */
192 scheduler->remove_sleep(thr);
193 if (thr->get_pending()->is_sleep())
194 thr->set_wakeup_state(true);
200 void ModelExecution::assert_bug(const char *msg)
202 priv->bugs.push_back(new bug_message(msg));
206 /** @return True, if any bugs have been reported for this execution */
207 bool ModelExecution::have_bug_reports() const
209 return priv->bugs.size() != 0;
212 SnapVector<bug_message *> * ModelExecution::get_bugs() const
218 * Check whether the current trace has triggered an assertion which should halt
221 * @return True, if the execution should be aborted; false otherwise
223 bool ModelExecution::has_asserted() const
225 return priv->asserted;
229 * Trigger a trace assertion which should cause this execution to be halted.
230 * This can be due to a detected bug or due to an infeasibility that should
233 void ModelExecution::set_assert()
235 priv->asserted = true;
239 * Check if we are in a deadlock. Should only be called at the end of an
240 * execution, although it should not give false positives in the middle of an
241 * execution (there should be some ENABLED thread).
243 * @return True if program is in a deadlock; false otherwise
245 bool ModelExecution::is_deadlocked() const
247 bool blocking_threads = false;
248 for (unsigned int i = 0;i < get_num_threads();i++) {
249 thread_id_t tid = int_to_id(i);
252 Thread *t = get_thread(tid);
253 if (!t->is_model_thread() && t->get_pending())
254 blocking_threads = true;
256 return blocking_threads;
260 * Check if this is a complete execution. That is, have all thread completed
261 * execution (rather than exiting because sleep sets have forced a redundant
264 * @return True if the execution is complete.
266 bool ModelExecution::is_complete_execution() const
268 for (unsigned int i = 0;i < get_num_threads();i++)
269 if (is_enabled(int_to_id(i)))
274 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
275 uint64_t value = *((const uint64_t *) location);
276 modelclock_t storeclock;
277 thread_id_t storethread;
278 getStoreThreadAndClock(location, &storethread, &storeclock);
279 setAtomicStoreFlag(location);
280 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
281 act->set_seq_number(storeclock);
282 add_normal_write_to_lists(act);
283 add_write_to_lists(act);
284 w_modification_order(act);
286 model->get_history()->process_action(act, act->get_tid());
292 * Processes a read model action.
293 * @param curr is the read model action to process.
294 * @param rf_set is the set of model actions we can possibly read from
295 * @return True if the read can be pruned from the thread map list.
297 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
299 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
300 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
301 if (hasnonatomicstore) {
302 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
303 rf_set->push_back(nonatomicstore);
306 // Remove writes that violate read modification order
309 while (i < rf_set->size()) {
310 ModelAction * rf = (*rf_set)[i];
311 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
312 (*rf_set)[i] = rf_set->back();
319 int index = fuzzer->selectWrite(curr, rf_set);
321 ModelAction *rf = (*rf_set)[index];
324 bool canprune = false;
325 if (r_modification_order(curr, rf, priorset, &canprune)) {
326 for(unsigned int i=0;i<priorset->size();i++) {
327 mo_graph->addEdge((*priorset)[i], rf);
330 get_thread(curr)->set_return_value(curr->get_return_value());
332 return canprune && (curr->get_type() == ATOMIC_READ);
335 (*rf_set)[index] = rf_set->back();
341 * Processes a lock, trylock, or unlock model action. @param curr is
342 * the read model action to process.
344 * The try lock operation checks whether the lock is taken. If not,
345 * it falls to the normal lock operation case. If so, it returns
348 * The lock operation has already been checked that it is enabled, so
349 * it just grabs the lock and synchronizes with the previous unlock.
351 * The unlock operation has to re-enable all of the threads that are
352 * waiting on the lock.
354 * @return True if synchronization was updated; false otherwise
356 bool ModelExecution::process_mutex(ModelAction *curr)
358 cdsc::mutex *mutex = curr->get_mutex();
359 struct cdsc::mutex_state *state = NULL;
362 state = mutex->get_state();
364 switch (curr->get_type()) {
365 case ATOMIC_TRYLOCK: {
366 bool success = !state->locked;
367 curr->set_try_lock(success);
369 get_thread(curr)->set_return_value(0);
372 get_thread(curr)->set_return_value(1);
374 //otherwise fall into the lock case
376 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
377 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
378 // assert_bug("Lock access before initialization");
380 // TODO: lock count for recursive mutexes
381 state->locked = get_thread(curr);
382 ModelAction *unlock = get_last_unlock(curr);
383 //synchronize with the previous unlock statement
384 if (unlock != NULL) {
385 synchronize(unlock, curr);
391 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
392 if (fuzzer->shouldWait(curr)) {
393 /* wake up the other threads */
394 for (unsigned int i = 0;i < get_num_threads();i++) {
395 Thread *t = get_thread(int_to_id(i));
396 Thread *curr_thrd = get_thread(curr);
397 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
401 /* unlock the lock - after checking who was waiting on it */
402 state->locked = NULL;
404 /* disable this thread */
405 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
406 scheduler->sleep(get_thread(curr));
411 case ATOMIC_TIMEDWAIT:
412 case ATOMIC_UNLOCK: {
413 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
414 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
415 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
416 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
417 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
418 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
419 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
420 //MUST EVENMTUALLY RELEASE...
422 // TODO: lock count for recursive mutexes
423 /* wake up the other threads */
424 for (unsigned int i = 0;i < get_num_threads();i++) {
425 Thread *t = get_thread(int_to_id(i));
426 Thread *curr_thrd = get_thread(curr);
427 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
431 /* unlock the lock - after checking who was waiting on it */
432 state->locked = NULL;
435 case ATOMIC_NOTIFY_ALL: {
436 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
437 //activate all the waiting threads
438 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
439 scheduler->wake(get_thread(rit->getVal()));
444 case ATOMIC_NOTIFY_ONE: {
445 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
446 if (waiters->size() != 0) {
447 Thread * thread = fuzzer->selectNotify(waiters);
448 scheduler->wake(thread);
460 * Process a write ModelAction
461 * @param curr The ModelAction to process
462 * @return True if the mo_graph was updated or promises were resolved
464 void ModelExecution::process_write(ModelAction *curr)
466 w_modification_order(curr);
467 get_thread(curr)->set_return_value(VALUE_NONE);
471 * Process a fence ModelAction
472 * @param curr The ModelAction to process
473 * @return True if synchronization was updated
475 bool ModelExecution::process_fence(ModelAction *curr)
478 * fence-relaxed: no-op
479 * fence-release: only log the occurence (not in this function), for
480 * use in later synchronization
481 * fence-acquire (this function): search for hypothetical release
483 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
485 bool updated = false;
486 if (curr->is_acquire()) {
487 action_list_t *list = &action_trace;
488 sllnode<ModelAction *> * rit;
489 /* Find X : is_read(X) && X --sb-> curr */
490 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
491 ModelAction *act = rit->getVal();
494 if (act->get_tid() != curr->get_tid())
496 /* Stop at the beginning of the thread */
497 if (act->is_thread_start())
499 /* Stop once we reach a prior fence-acquire */
500 if (act->is_fence() && act->is_acquire())
504 /* read-acquire will find its own release sequences */
505 if (act->is_acquire())
508 /* Establish hypothetical release sequences */
509 ClockVector *cv = get_hb_from_write(act->get_reads_from());
510 if (cv != NULL && curr->get_cv()->merge(cv))
518 * @brief Process the current action for thread-related activity
520 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
521 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
522 * synchronization, etc. This function is a no-op for non-THREAD actions
523 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
525 * @param curr The current action
526 * @return True if synchronization was updated or a thread completed
528 void ModelExecution::process_thread_action(ModelAction *curr)
530 switch (curr->get_type()) {
531 case THREAD_CREATE: {
532 thrd_t *thrd = (thrd_t *)curr->get_location();
533 struct thread_params *params = (struct thread_params *)curr->get_value();
534 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
535 curr->set_thread_operand(th);
537 th->set_creation(curr);
540 case PTHREAD_CREATE: {
541 (*(uint32_t *)curr->get_location()) = pthread_counter++;
543 struct pthread_params *params = (struct pthread_params *)curr->get_value();
544 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
545 curr->set_thread_operand(th);
547 th->set_creation(curr);
549 if ( pthread_map.size() < pthread_counter )
550 pthread_map.resize( pthread_counter );
551 pthread_map[ pthread_counter-1 ] = th;
556 Thread *blocking = curr->get_thread_operand();
557 ModelAction *act = get_last_action(blocking->get_id());
558 synchronize(act, curr);
562 Thread *blocking = curr->get_thread_operand();
563 ModelAction *act = get_last_action(blocking->get_id());
564 synchronize(act, curr);
565 break; // WL: to be add (modified)
568 case THREADONLY_FINISH:
569 case THREAD_FINISH: {
570 Thread *th = get_thread(curr);
571 if (curr->get_type() == THREAD_FINISH &&
572 th == model->getInitThread()) {
578 /* Wake up any joining threads */
579 for (unsigned int i = 0;i < get_num_threads();i++) {
580 Thread *waiting = get_thread(int_to_id(i));
581 if (waiting->waiting_on() == th &&
582 waiting->get_pending()->is_thread_join())
583 scheduler->wake(waiting);
592 Thread *th = get_thread(curr);
593 th->set_pending(curr);
594 scheduler->add_sleep(th);
603 * Initialize the current action by performing one or more of the following
604 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
605 * manipulating backtracking sets, allocating and
606 * initializing clock vectors, and computing the promises to fulfill.
608 * @param curr The current action, as passed from the user context; may be
609 * freed/invalidated after the execution of this function, with a different
610 * action "returned" its place (pass-by-reference)
611 * @return True if curr is a newly-explored action; false otherwise
613 bool ModelExecution::initialize_curr_action(ModelAction **curr)
615 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
616 ModelAction *newcurr = process_rmw(*curr);
622 ModelAction *newcurr = *curr;
624 newcurr->set_seq_number(get_next_seq_num());
625 /* Always compute new clock vector */
626 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
628 /* Assign most recent release fence */
629 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
631 return true; /* This was a new ModelAction */
636 * @brief Establish reads-from relation between two actions
638 * Perform basic operations involved with establishing a concrete rf relation,
639 * including setting the ModelAction data and checking for release sequences.
641 * @param act The action that is reading (must be a read)
642 * @param rf The action from which we are reading (must be a write)
644 * @return True if this read established synchronization
647 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
650 ASSERT(rf->is_write());
652 act->set_read_from(rf);
653 if (act->is_acquire()) {
654 ClockVector *cv = get_hb_from_write(rf);
657 act->get_cv()->merge(cv);
662 * @brief Synchronizes two actions
664 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
665 * This function performs the synchronization as well as providing other hooks
666 * for other checks along with synchronization.
668 * @param first The left-hand side of the synchronizes-with relation
669 * @param second The right-hand side of the synchronizes-with relation
670 * @return True if the synchronization was successful (i.e., was consistent
671 * with the execution order); false otherwise
673 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
675 if (*second < *first) {
676 ASSERT(0); //This should not happend
679 return second->synchronize_with(first);
683 * @brief Check whether a model action is enabled.
685 * Checks whether an operation would be successful (i.e., is a lock already
686 * locked, or is the joined thread already complete).
688 * For yield-blocking, yields are never enabled.
690 * @param curr is the ModelAction to check whether it is enabled.
691 * @return a bool that indicates whether the action is enabled.
693 bool ModelExecution::check_action_enabled(ModelAction *curr) {
694 if (curr->is_lock()) {
695 cdsc::mutex *lock = curr->get_mutex();
696 struct cdsc::mutex_state *state = lock->get_state();
698 Thread *lock_owner = (Thread *)state->locked;
699 Thread *curr_thread = get_thread(curr);
700 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
706 } else if (curr->is_thread_join()) {
707 Thread *blocking = curr->get_thread_operand();
708 if (!blocking->is_complete()) {
711 } else if (curr->is_sleep()) {
712 if (!fuzzer->shouldSleep(curr))
720 * This is the heart of the model checker routine. It performs model-checking
721 * actions corresponding to a given "current action." Among other processes, it
722 * calculates reads-from relationships, updates synchronization clock vectors,
723 * forms a memory_order constraints graph, and handles replay/backtrack
724 * execution when running permutations of previously-observed executions.
726 * @param curr The current action to process
727 * @return The ModelAction that is actually executed; may be different than
730 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
733 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
734 bool newly_explored = initialize_curr_action(&curr);
738 wake_up_sleeping_actions(curr);
740 SnapVector<ModelAction *> * rf_set = NULL;
741 /* Build may_read_from set for newly-created actions */
742 if (newly_explored && curr->is_read())
743 rf_set = build_may_read_from(curr);
745 bool canprune = false;
747 if (curr->is_read() && !second_part_of_rmw) {
748 canprune = process_read(curr, rf_set);
751 ASSERT(rf_set == NULL);
753 /* Add the action to lists */
754 if (!second_part_of_rmw)
755 add_action_to_lists(curr, canprune);
757 if (curr->is_write())
758 add_write_to_lists(curr);
760 process_thread_action(curr);
762 if (curr->is_write())
765 if (curr->is_fence())
768 if (curr->is_mutex_op())
774 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
775 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
776 ModelAction *lastread = get_last_action(act->get_tid());
777 lastread->process_rmw(act);
779 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
785 * @brief Updates the mo_graph with the constraints imposed from the current
788 * Basic idea is the following: Go through each other thread and find
789 * the last action that happened before our read. Two cases:
791 * -# The action is a write: that write must either occur before
792 * the write we read from or be the write we read from.
793 * -# The action is a read: the write that that action read from
794 * must occur before the write we read from or be the same write.
796 * @param curr The current action. Must be a read.
797 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
798 * @param check_only If true, then only check whether the current action satisfies
799 * read modification order or not, without modifiying priorset and canprune.
801 * @return True if modification order edges were added; false otherwise
804 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
805 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
807 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
808 ASSERT(curr->is_read());
810 /* Last SC fence in the current thread */
811 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
813 int tid = curr->get_tid();
815 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
816 if ((int)thrd_lists->size() <= tid) {
817 uint oldsize = thrd_lists->size();
818 thrd_lists->resize(priv->next_thread_id);
819 for(uint i = oldsize;i < priv->next_thread_id;i++)
820 new (&(*thrd_lists)[i]) action_list_t();
823 ModelAction *prev_same_thread = NULL;
824 /* Iterate over all threads */
825 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
826 /* Last SC fence in thread tid */
827 ModelAction *last_sc_fence_thread_local = NULL;
829 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
831 /* Last SC fence in thread tid, before last SC fence in current thread */
832 ModelAction *last_sc_fence_thread_before = NULL;
833 if (last_sc_fence_local)
834 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
836 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
837 if (prev_same_thread != NULL &&
838 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
839 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
843 /* Iterate over actions in thread, starting from most recent */
844 action_list_t *list = &(*thrd_lists)[tid];
845 sllnode<ModelAction *> * rit;
846 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
847 ModelAction *act = rit->getVal();
852 /* Don't want to add reflexive edges on 'rf' */
853 if (act->equals(rf)) {
854 if (act->happens_before(curr))
860 if (act->is_write()) {
861 /* C++, Section 29.3 statement 5 */
862 if (curr->is_seqcst() && last_sc_fence_thread_local &&
863 *act < *last_sc_fence_thread_local) {
864 if (mo_graph->checkReachable(rf, act))
867 priorset->push_back(act);
870 /* C++, Section 29.3 statement 4 */
871 else if (act->is_seqcst() && last_sc_fence_local &&
872 *act < *last_sc_fence_local) {
873 if (mo_graph->checkReachable(rf, act))
876 priorset->push_back(act);
879 /* C++, Section 29.3 statement 6 */
880 else if (last_sc_fence_thread_before &&
881 *act < *last_sc_fence_thread_before) {
882 if (mo_graph->checkReachable(rf, act))
885 priorset->push_back(act);
891 * Include at most one act per-thread that "happens
894 if (act->happens_before(curr)) {
896 if (last_sc_fence_local == NULL ||
897 (*last_sc_fence_local < *act)) {
898 prev_same_thread = act;
901 if (act->is_write()) {
902 if (mo_graph->checkReachable(rf, act))
905 priorset->push_back(act);
907 ModelAction *prevrf = act->get_reads_from();
908 if (!prevrf->equals(rf)) {
909 if (mo_graph->checkReachable(rf, prevrf))
912 priorset->push_back(prevrf);
914 if (act->get_tid() == curr->get_tid()) {
915 //Can prune curr from obj list
929 * Updates the mo_graph with the constraints imposed from the current write.
931 * Basic idea is the following: Go through each other thread and find
932 * the lastest action that happened before our write. Two cases:
934 * (1) The action is a write => that write must occur before
937 * (2) The action is a read => the write that that action read from
938 * must occur before the current write.
940 * This method also handles two other issues:
942 * (I) Sequential Consistency: Making sure that if the current write is
943 * seq_cst, that it occurs after the previous seq_cst write.
945 * (II) Sending the write back to non-synchronizing reads.
947 * @param curr The current action. Must be a write.
948 * @param send_fv A vector for stashing reads to which we may pass our future
949 * value. If NULL, then don't record any future values.
950 * @return True if modification order edges were added; false otherwise
952 void ModelExecution::w_modification_order(ModelAction *curr)
954 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
956 ASSERT(curr->is_write());
958 SnapList<ModelAction *> edgeset;
960 if (curr->is_seqcst()) {
961 /* We have to at least see the last sequentially consistent write,
962 so we are initialized. */
963 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
964 if (last_seq_cst != NULL) {
965 edgeset.push_back(last_seq_cst);
967 //update map for next query
968 obj_last_sc_map.put(curr->get_location(), curr);
971 /* Last SC fence in the current thread */
972 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
974 /* Iterate over all threads */
975 for (i = 0;i < thrd_lists->size();i++) {
976 /* Last SC fence in thread i, before last SC fence in current thread */
977 ModelAction *last_sc_fence_thread_before = NULL;
978 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
979 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
981 /* Iterate over actions in thread, starting from most recent */
982 action_list_t *list = &(*thrd_lists)[i];
983 sllnode<ModelAction*>* rit;
984 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
985 ModelAction *act = rit->getVal();
988 * 1) If RMW and it actually read from something, then we
989 * already have all relevant edges, so just skip to next
992 * 2) If RMW and it didn't read from anything, we should
993 * whatever edge we can get to speed up convergence.
995 * 3) If normal write, we need to look at earlier actions, so
996 * continue processing list.
998 if (curr->is_rmw()) {
999 if (curr->get_reads_from() != NULL)
1007 /* C++, Section 29.3 statement 7 */
1008 if (last_sc_fence_thread_before && act->is_write() &&
1009 *act < *last_sc_fence_thread_before) {
1010 edgeset.push_back(act);
1015 * Include at most one act per-thread that "happens
1018 if (act->happens_before(curr)) {
1020 * Note: if act is RMW, just add edge:
1022 * The following edge should be handled elsewhere:
1023 * readfrom(act) --mo--> act
1025 if (act->is_write())
1026 edgeset.push_back(act);
1027 else if (act->is_read()) {
1028 //if previous read accessed a null, just keep going
1029 edgeset.push_back(act->get_reads_from());
1035 mo_graph->addEdges(&edgeset, curr);
1040 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1041 * some constraints. This method checks one the following constraint (others
1042 * require compiler support):
1044 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1045 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1047 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1049 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1051 /* Iterate over all threads */
1052 for (i = 0;i < thrd_lists->size();i++) {
1053 const ModelAction *write_after_read = NULL;
1055 /* Iterate over actions in thread, starting from most recent */
1056 action_list_t *list = &(*thrd_lists)[i];
1057 sllnode<ModelAction *>* rit;
1058 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1059 ModelAction *act = rit->getVal();
1061 /* Don't disallow due to act == reader */
1062 if (!reader->happens_before(act) || reader == act)
1064 else if (act->is_write())
1065 write_after_read = act;
1066 else if (act->is_read() && act->get_reads_from() != NULL)
1067 write_after_read = act->get_reads_from();
1070 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1077 * Computes the clock vector that happens before propagates from this write.
1079 * @param rf The action that might be part of a release sequence. Must be a
1081 * @return ClockVector of happens before relation.
1084 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1085 SnapVector<ModelAction *> * processset = NULL;
1086 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1087 ASSERT(rf->is_write());
1088 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1090 if (processset == NULL)
1091 processset = new SnapVector<ModelAction *>();
1092 processset->push_back(rf);
1095 int i = (processset == NULL) ? 0 : processset->size();
1097 ClockVector * vec = NULL;
1099 if (rf->get_rfcv() != NULL) {
1100 vec = rf->get_rfcv();
1101 } else if (rf->is_acquire() && rf->is_release()) {
1103 } else if (rf->is_release() && !rf->is_rmw()) {
1105 } else if (rf->is_release()) {
1106 //have rmw that is release and doesn't have a rfcv
1107 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1110 //operation that isn't release
1111 if (rf->get_last_fence_release()) {
1113 vec = rf->get_last_fence_release()->get_cv();
1115 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1121 rf = (*processset)[i];
1125 if (processset != NULL)
1131 * Performs various bookkeeping operations for the current ModelAction. For
1132 * instance, adds action to the per-object, per-thread action vector and to the
1133 * action trace list of all thread actions.
1135 * @param act is the ModelAction to add.
1137 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1139 int tid = id_to_int(act->get_tid());
1140 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1141 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1142 act->setActionRef(list->add_back(act));
1145 // Update action trace, a total order of all actions
1146 act->setTraceRef(action_trace.add_back(act));
1149 // Update obj_thrd_map, a per location, per thread, order of actions
1150 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1151 if ((int)vec->size() <= tid) {
1152 uint oldsize = vec->size();
1153 vec->resize(priv->next_thread_id);
1154 for(uint i = oldsize;i < priv->next_thread_id;i++)
1155 new (&(*vec)[i]) action_list_t();
1157 if (!canprune && (act->is_read() || act->is_write()))
1158 act->setThrdMapRef((*vec)[tid].add_back(act));
1160 // Update thrd_last_action, the last action taken by each thread
1161 if ((int)thrd_last_action.size() <= tid)
1162 thrd_last_action.resize(get_num_threads());
1163 thrd_last_action[tid] = act;
1165 // Update thrd_last_fence_release, the last release fence taken by each thread
1166 if (act->is_fence() && act->is_release()) {
1167 if ((int)thrd_last_fence_release.size() <= tid)
1168 thrd_last_fence_release.resize(get_num_threads());
1169 thrd_last_fence_release[tid] = act;
1172 if (act->is_wait()) {
1173 void *mutex_loc = (void *) act->get_value();
1174 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1178 sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
1179 sllnode<ModelAction*> * rit = list->end();
1180 modelclock_t next_seq = act->get_seq_number();
1181 if (rit == NULL || (rit->getVal()->get_seq_number() <= next_seq))
1182 return list->add_back(act);
1184 for(;rit != NULL;rit=rit->getPrev()) {
1185 if (rit->getVal()->get_seq_number() <= next_seq) {
1186 return list->insertAfter(rit, act);
1189 return list->add_front(act);
1193 sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1194 sllnode<ModelAction*> * rit = list->end();
1195 modelclock_t next_seq = act->get_seq_number();
1197 act->create_cv(NULL);
1198 return list->add_back(act);
1199 } else if (rit->getVal()->get_seq_number() <= next_seq) {
1200 act->create_cv(rit->getVal());
1201 return list->add_back(act);
1203 for(;rit != NULL;rit=rit->getPrev()) {
1204 if (rit->getVal()->get_seq_number() <= next_seq) {
1205 act->create_cv(rit->getVal());
1206 return list->insertAfter(rit, act);
1209 return list->add_front(act);
1214 * Performs various bookkeeping operations for a normal write. The
1215 * complication is that we are typically inserting a normal write
1216 * lazily, so we need to insert it into the middle of lists.
1218 * @param act is the ModelAction to add.
1221 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1223 int tid = id_to_int(act->get_tid());
1224 act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
1226 // Update obj_thrd_map, a per location, per thread, order of actions
1227 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1228 if (tid >= (int)vec->size()) {
1229 uint oldsize =vec->size();
1230 vec->resize(priv->next_thread_id);
1231 for(uint i=oldsize;i<priv->next_thread_id;i++)
1232 new (&(*vec)[i]) action_list_t();
1234 act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
1236 ModelAction * lastact = thrd_last_action[tid];
1237 // Update thrd_last_action, the last action taken by each thrad
1238 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1239 thrd_last_action[tid] = act;
1243 void ModelExecution::add_write_to_lists(ModelAction *write) {
1244 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1245 int tid = id_to_int(write->get_tid());
1246 if (tid >= (int)vec->size()) {
1247 uint oldsize =vec->size();
1248 vec->resize(priv->next_thread_id);
1249 for(uint i=oldsize;i<priv->next_thread_id;i++)
1250 new (&(*vec)[i]) action_list_t();
1252 write->setActionRef((*vec)[tid].add_back(write));
1256 * @brief Get the last action performed by a particular Thread
1257 * @param tid The thread ID of the Thread in question
1258 * @return The last action in the thread
1260 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1262 int threadid = id_to_int(tid);
1263 if (threadid < (int)thrd_last_action.size())
1264 return thrd_last_action[id_to_int(tid)];
1270 * @brief Get the last fence release performed by a particular Thread
1271 * @param tid The thread ID of the Thread in question
1272 * @return The last fence release in the thread, if one exists; NULL otherwise
1274 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1276 int threadid = id_to_int(tid);
1277 if (threadid < (int)thrd_last_fence_release.size())
1278 return thrd_last_fence_release[id_to_int(tid)];
1284 * Gets the last memory_order_seq_cst write (in the total global sequence)
1285 * performed on a particular object (i.e., memory location), not including the
1287 * @param curr The current ModelAction; also denotes the object location to
1289 * @return The last seq_cst write
1291 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1293 void *location = curr->get_location();
1294 return obj_last_sc_map.get(location);
1298 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1299 * performed in a particular thread, prior to a particular fence.
1300 * @param tid The ID of the thread to check
1301 * @param before_fence The fence from which to begin the search; if NULL, then
1302 * search for the most recent fence in the thread.
1303 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1305 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1307 /* All fences should have location FENCE_LOCATION */
1308 action_list_t *list = obj_map.get(FENCE_LOCATION);
1313 sllnode<ModelAction*>* rit = list->end();
1316 for (;rit != NULL;rit=rit->getPrev())
1317 if (rit->getVal() == before_fence)
1320 ASSERT(rit->getVal() == before_fence);
1324 for (;rit != NULL;rit=rit->getPrev()) {
1325 ModelAction *act = rit->getVal();
1326 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1333 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1334 * location). This function identifies the mutex according to the current
1335 * action, which is presumed to perform on the same mutex.
1336 * @param curr The current ModelAction; also denotes the object location to
1338 * @return The last unlock operation
1340 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1342 void *location = curr->get_location();
1344 action_list_t *list = obj_map.get(location);
1348 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1349 sllnode<ModelAction*>* rit;
1350 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1351 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1352 return rit->getVal();
1356 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1358 ModelAction *parent = get_last_action(tid);
1360 parent = get_thread(tid)->get_creation();
1365 * Returns the clock vector for a given thread.
1366 * @param tid The thread whose clock vector we want
1367 * @return Desired clock vector
1369 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1371 ModelAction *firstaction=get_parent_action(tid);
1372 return firstaction != NULL ? firstaction->get_cv() : NULL;
1375 bool valequals(uint64_t val1, uint64_t val2, int size) {
1378 return ((uint8_t)val1) == ((uint8_t)val2);
1380 return ((uint16_t)val1) == ((uint16_t)val2);
1382 return ((uint32_t)val1) == ((uint32_t)val2);
1392 * Build up an initial set of all past writes that this 'read' action may read
1393 * from, as well as any previously-observed future values that must still be valid.
1395 * @param curr is the current ModelAction that we are exploring; it must be a
1398 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1400 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1402 ASSERT(curr->is_read());
1404 ModelAction *last_sc_write = NULL;
1406 if (curr->is_seqcst())
1407 last_sc_write = get_last_seq_cst_write(curr);
1409 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1411 /* Iterate over all threads */
1412 if (thrd_lists != NULL)
1413 for (i = 0;i < thrd_lists->size();i++) {
1414 /* Iterate over actions in thread, starting from most recent */
1415 action_list_t *list = &(*thrd_lists)[i];
1416 sllnode<ModelAction *> * rit;
1417 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1418 ModelAction *act = rit->getVal();
1423 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1424 bool allow_read = true;
1426 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1429 /* Need to check whether we will have two RMW reading from the same value */
1430 if (curr->is_rmwr()) {
1431 /* It is okay if we have a failing CAS */
1432 if (!curr->is_rmwrcas() ||
1433 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1434 //Need to make sure we aren't the second RMW
1435 CycleNode * node = mo_graph->getNode_noCreate(act);
1436 if (node != NULL && node->getRMW() != NULL) {
1437 //we are the second RMW
1444 /* Only add feasible reads */
1445 rf_set->push_back(act);
1448 /* Include at most one act per-thread that "happens before" curr */
1449 if (act->happens_before(curr))
1454 if (DBG_ENABLED()) {
1455 model_print("Reached read action:\n");
1457 model_print("End printing read_from_past\n");
1462 static void print_list(action_list_t *list)
1464 sllnode<ModelAction*> *it;
1466 model_print("------------------------------------------------------------------------------------\n");
1467 model_print("# t Action type MO Location Value Rf CV\n");
1468 model_print("------------------------------------------------------------------------------------\n");
1470 unsigned int hash = 0;
1472 for (it = list->begin();it != NULL;it=it->getNext()) {
1473 const ModelAction *act = it->getVal();
1474 if (act->get_seq_number() > 0)
1476 hash = hash^(hash<<3)^(it->getVal()->hash());
1478 model_print("HASH %u\n", hash);
1479 model_print("------------------------------------------------------------------------------------\n");
1482 #if SUPPORT_MOD_ORDER_DUMP
1483 void ModelExecution::dumpGraph(char *filename)
1486 sprintf(buffer, "%s.dot", filename);
1487 FILE *file = fopen(buffer, "w");
1488 fprintf(file, "digraph %s {\n", filename);
1489 mo_graph->dumpNodes(file);
1490 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1492 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1493 ModelAction *act = it->getVal();
1494 if (act->is_read()) {
1495 mo_graph->dot_print_node(file, act);
1496 mo_graph->dot_print_edge(file,
1497 act->get_reads_from(),
1499 "label=\"rf\", color=red, weight=2");
1501 if (thread_array[act->get_tid()]) {
1502 mo_graph->dot_print_edge(file,
1503 thread_array[id_to_int(act->get_tid())],
1505 "label=\"sb\", color=blue, weight=400");
1508 thread_array[act->get_tid()] = act;
1510 fprintf(file, "}\n");
1511 model_free(thread_array);
1516 /** @brief Prints an execution trace summary. */
1517 void ModelExecution::print_summary()
1519 #if SUPPORT_MOD_ORDER_DUMP
1520 char buffername[100];
1521 sprintf(buffername, "exec%04u", get_execution_number());
1522 mo_graph->dumpGraphToFile(buffername);
1523 sprintf(buffername, "graph%04u", get_execution_number());
1524 dumpGraph(buffername);
1527 model_print("Execution trace %d:", get_execution_number());
1528 if (scheduler->all_threads_sleeping())
1529 model_print(" SLEEP-SET REDUNDANT");
1530 if (have_bug_reports())
1531 model_print(" DETECTED BUG(S)");
1535 print_list(&action_trace);
1540 void ModelExecution::print_tail()
1542 model_print("Execution trace %d:\n", get_execution_number());
1544 sllnode<ModelAction*> *it;
1546 model_print("------------------------------------------------------------------------------------\n");
1547 model_print("# t Action type MO Location Value Rf CV\n");
1548 model_print("------------------------------------------------------------------------------------\n");
1550 unsigned int hash = 0;
1554 SnapList<ModelAction *> list;
1555 for (it = action_trace.end(); it != NULL; it = it->getPrev()) {
1556 if (counter > length)
1559 ModelAction * act = it->getVal();
1560 list.push_front(act);
1564 for (it = list.begin();it != NULL;it=it->getNext()) {
1565 const ModelAction *act = it->getVal();
1566 if (act->get_seq_number() > 0)
1568 hash = hash^(hash<<3)^(it->getVal()->hash());
1570 model_print("HASH %u\n", hash);
1571 model_print("------------------------------------------------------------------------------------\n");
1575 * Add a Thread to the system for the first time. Should only be called once
1577 * @param t The Thread to add
1579 void ModelExecution::add_thread(Thread *t)
1581 unsigned int i = id_to_int(t->get_id());
1582 if (i >= thread_map.size())
1583 thread_map.resize(i + 1);
1585 if (!t->is_model_thread())
1586 scheduler->add_thread(t);
1590 * @brief Get a Thread reference by its ID
1591 * @param tid The Thread's ID
1592 * @return A Thread reference
1594 Thread * ModelExecution::get_thread(thread_id_t tid) const
1596 unsigned int i = id_to_int(tid);
1597 if (i < thread_map.size())
1598 return thread_map[i];
1603 * @brief Get a reference to the Thread in which a ModelAction was executed
1604 * @param act The ModelAction
1605 * @return A Thread reference
1607 Thread * ModelExecution::get_thread(const ModelAction *act) const
1609 return get_thread(act->get_tid());
1613 * @brief Get a Thread reference by its pthread ID
1614 * @param index The pthread's ID
1615 * @return A Thread reference
1617 Thread * ModelExecution::get_pthread(pthread_t pid) {
1618 // pid 1 is reserved for the main thread, pthread ids should start from 2
1620 return get_thread(pid);
1627 uint32_t thread_id = x.v;
1629 if (thread_id < pthread_counter + 1)
1630 return pthread_map[thread_id];
1636 * @brief Check if a Thread is currently enabled
1637 * @param t The Thread to check
1638 * @return True if the Thread is currently enabled
1640 bool ModelExecution::is_enabled(Thread *t) const
1642 return scheduler->is_enabled(t);
1646 * @brief Check if a Thread is currently enabled
1647 * @param tid The ID of the Thread to check
1648 * @return True if the Thread is currently enabled
1650 bool ModelExecution::is_enabled(thread_id_t tid) const
1652 return scheduler->is_enabled(tid);
1656 * @brief Select the next thread to execute based on the curren action
1658 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1659 * actions should be followed by the execution of their child thread. In either
1660 * case, the current action should determine the next thread schedule.
1662 * @param curr The current action
1663 * @return The next thread to run, if the current action will determine this
1664 * selection; otherwise NULL
1666 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1668 /* Do not split atomic RMW */
1669 if (curr->is_rmwr())
1670 return get_thread(curr);
1671 /* Follow CREATE with the created thread */
1672 /* which is not needed, because model.cc takes care of this */
1673 if (curr->get_type() == THREAD_CREATE)
1674 return curr->get_thread_operand();
1675 if (curr->get_type() == PTHREAD_CREATE) {
1676 return curr->get_thread_operand();
1682 * Takes the next step in the execution, if possible.
1683 * @param curr The current step to take
1684 * @return Returns the next Thread to run, if any; NULL if this execution
1687 Thread * ModelExecution::take_step(ModelAction *curr)
1689 Thread *curr_thrd = get_thread(curr);
1690 ASSERT(curr_thrd->get_state() == THREAD_READY);
1692 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1693 curr = check_current_action(curr);
1696 /* Process this action in ModelHistory for records */
1698 model->get_history()->process_action( curr, curr->get_tid() );
1700 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1701 scheduler->remove_thread(curr_thrd);
1703 return action_select_next_thread(curr);
1706 /** This method removes references to an Action before we delete it. */
1708 void ModelExecution::removeAction(ModelAction *act) {
1710 sllnode<ModelAction *> * listref = act->getTraceRef();
1711 if (listref != NULL) {
1712 action_trace.erase(listref);
1716 sllnode<ModelAction *> * listref = act->getThrdMapRef();
1717 if (listref != NULL) {
1718 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1719 (*vec)[act->get_tid()].erase(listref);
1722 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1723 sllnode<ModelAction *> * listref = act->getActionRef();
1724 if (listref != NULL) {
1725 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1726 list->erase(listref);
1728 } else if (act->is_wait()) {
1729 sllnode<ModelAction *> * listref = act->getActionRef();
1730 if (listref != NULL) {
1731 void *mutex_loc = (void *) act->get_value();
1732 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1734 } else if (act->is_free()) {
1735 sllnode<ModelAction *> * listref = act->getActionRef();
1736 if (listref != NULL) {
1737 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1738 (*vec)[act->get_tid()].erase(listref);
1740 //Clear it from last_sc_map
1741 if (obj_last_sc_map.get(act->get_location()) == act) {
1742 obj_last_sc_map.remove(act->get_location());
1745 //Remove from Cyclegraph
1746 mo_graph->freeAction(act);
1750 /** Computes clock vector that all running threads have already synchronized to. */
1752 ClockVector * ModelExecution::computeMinimalCV() {
1753 ClockVector *cvmin = NULL;
1754 //Thread 0 isn't a real thread, so skip it..
1755 for(unsigned int i = 1;i < thread_map.size();i++) {
1756 Thread * t = thread_map[i];
1757 if (t->get_state() == THREAD_COMPLETED)
1759 thread_id_t tid = int_to_id(i);
1760 ClockVector * cv = get_cv(tid);
1762 cvmin = new ClockVector(cv, NULL);
1764 cvmin->minmerge(cv);
1770 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1772 void ModelExecution::fixupLastAct(ModelAction *act) {
1773 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1774 newact->set_seq_number(get_next_seq_num());
1775 newact->create_cv(act);
1776 newact->set_last_fence_release(act->get_last_fence_release());
1777 add_action_to_lists(newact, false);
1780 /** Compute which actions to free. */
1782 void ModelExecution::collectActions() {
1783 if (priv->used_sequence_numbers < params->traceminsize)
1786 //Compute minimal clock vector for all live threads
1787 ClockVector *cvmin = computeMinimalCV();
1788 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1789 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1791 //Next walk action trace... When we hit an action, see if it is
1792 //invisible (e.g., earlier than the first before the minimum
1793 //clock for the thread... if so erase it and all previous
1794 //actions in cyclegraph
1795 sllnode<ModelAction*> * it;
1796 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1797 ModelAction *act = it->getVal();
1798 modelclock_t actseq = act->get_seq_number();
1800 //See if we are done
1801 if (actseq > maxtofree)
1804 thread_id_t act_tid = act->get_tid();
1805 modelclock_t tid_clock = cvmin->getClock(act_tid);
1807 //Free if it is invisible or we have set a flag to remove visible actions.
1808 if (actseq <= tid_clock || params->removevisible) {
1809 ModelAction * write;
1810 if (act->is_write()) {
1812 } else if (act->is_read()) {
1813 write = act->get_reads_from();
1817 //Mark everything earlier in MO graph to be freed
1818 CycleNode * cn = mo_graph->getNode_noCreate(write);
1820 queue->push_back(cn);
1821 while(!queue->empty()) {
1822 CycleNode * node = queue->back();
1824 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1825 CycleNode * prevnode = node->getInEdge(i);
1826 ModelAction * prevact = prevnode->getAction();
1827 if (prevact->get_type() != READY_FREE) {
1828 prevact->set_free();
1829 queue->push_back(prevnode);
1837 //We may need to remove read actions in the window we don't delete to preserve correctness.
1839 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1840 ModelAction *act = it2->getVal();
1841 //Do iteration early in case we delete the act
1843 bool islastact = false;
1844 ModelAction *lastact = get_last_action(act->get_tid());
1845 if (act == lastact) {
1846 Thread * th = get_thread(act);
1847 islastact = !th->is_complete();
1850 if (act->is_read()) {
1851 if (act->get_reads_from()->is_free()) {
1852 if (act->is_rmw()) {
1853 //Weaken a RMW from a freed store to a write
1854 act->set_type(ATOMIC_WRITE);
1866 //If we don't delete the action, we should remove references to release fences
1868 const ModelAction *rel_fence =act->get_last_fence_release();
1869 if (rel_fence != NULL) {
1870 modelclock_t relfenceseq = rel_fence->get_seq_number();
1871 thread_id_t relfence_tid = rel_fence->get_tid();
1872 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1873 //Remove references to irrelevant release fences
1874 if (relfenceseq <= tid_clock)
1875 act->set_last_fence_release(NULL);
1878 //Now we are in the window of old actions that we remove if possible
1879 for (;it != NULL;) {
1880 ModelAction *act = it->getVal();
1881 //Do iteration early since we may delete node...
1883 bool islastact = false;
1884 ModelAction *lastact = get_last_action(act->get_tid());
1885 if (act == lastact) {
1886 Thread * th = get_thread(act);
1887 islastact = !th->is_complete();
1890 if (act->is_read()) {
1891 if (act->get_reads_from()->is_free()) {
1892 if (act->is_rmw()) {
1893 act->set_type(ATOMIC_WRITE);
1903 } else if (act->is_free()) {
1910 } else if (act->is_write()) {
1911 //Do nothing with write that hasn't been marked to be freed
1912 } else if (islastact) {
1913 //Keep the last action for non-read/write actions
1914 } else if (act->is_fence()) {
1915 //Note that acquire fences can always be safely
1916 //removed, but could incur extra overheads in
1917 //traversals. Removing them before the cvmin seems
1918 //like a good compromise.
1920 //Release fences before the cvmin don't do anything
1921 //because everyone has already synchronized.
1923 //Sequentially fences before cvmin are redundant
1924 //because happens-before will enforce same
1927 modelclock_t actseq = act->get_seq_number();
1928 thread_id_t act_tid = act->get_tid();
1929 modelclock_t tid_clock = cvmin->getClock(act_tid);
1930 if (actseq <= tid_clock) {
1932 // Remove reference to act from thrd_last_fence_release
1933 int thread_id = id_to_int( act->get_tid() );
1934 if (thrd_last_fence_release[thread_id] == act) {
1935 thrd_last_fence_release[thread_id] = NULL;
1941 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1942 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1943 //need to keep most recent unlock/wait for each lock
1944 if(act->is_unlock() || act->is_wait()) {
1945 ModelAction * lastlock = get_last_unlock(act);
1946 if (lastlock != act) {
1951 } else if (act->is_create()) {
1952 if (act->get_thread_operand()->is_complete()) {
1964 //If we don't delete the action, we should remove references to release fences
1965 const ModelAction *rel_fence =act->get_last_fence_release();
1966 if (rel_fence != NULL) {
1967 modelclock_t relfenceseq = rel_fence->get_seq_number();
1968 thread_id_t relfence_tid = rel_fence->get_tid();
1969 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1970 //Remove references to irrelevant release fences
1971 if (relfenceseq <= tid_clock)
1972 act->set_last_fence_release(NULL);
1980 Fuzzer * ModelExecution::getFuzzer() {