11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
49 /** @brief Constructor */
50 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
54 thread_map(2), /* We'll always need at least 2 threads */
59 condvar_waiters_map(),
63 thrd_last_fence_release(),
64 priv(new struct model_snapshot_members ()),
65 mo_graph(new CycleGraph()),
66 fuzzer(new NewFuzzer()),
69 /* Initialize a model-checker thread, for special ModelActions */
70 model_thread = new Thread(get_next_id());
71 add_thread(model_thread);
72 fuzzer->register_engine(m->get_history(), this);
73 scheduler->register_engine(this);
75 pthread_key_create(&pthreadkey, tlsdestructor);
79 /** @brief Destructor */
80 ModelExecution::~ModelExecution()
82 for (unsigned int i = 0;i < get_num_threads();i++)
83 delete get_thread(int_to_id(i));
89 int ModelExecution::get_execution_number() const
91 return model->get_execution_number();
94 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
96 action_list_t *tmp = hash->get(ptr);
98 tmp = new action_list_t();
104 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
106 SnapVector<action_list_t> *tmp = hash->get(ptr);
108 tmp = new SnapVector<action_list_t>();
114 /** @return a thread ID for a new Thread */
115 thread_id_t ModelExecution::get_next_id()
117 return priv->next_thread_id++;
120 /** @return the number of user threads created during this execution */
121 unsigned int ModelExecution::get_num_threads() const
123 return priv->next_thread_id;
126 /** @return a sequence number for a new ModelAction */
127 modelclock_t ModelExecution::get_next_seq_num()
129 return ++priv->used_sequence_numbers;
132 /** @return a sequence number for a new ModelAction */
133 modelclock_t ModelExecution::get_curr_seq_num()
135 return priv->used_sequence_numbers;
138 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
139 void ModelExecution::restore_last_seq_num()
141 priv->used_sequence_numbers--;
145 * @brief Should the current action wake up a given thread?
147 * @param curr The current action
148 * @param thread The thread that we might wake up
149 * @return True, if we should wake up the sleeping thread; false otherwise
151 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
153 const ModelAction *asleep = thread->get_pending();
154 /* Don't allow partial RMW to wake anyone up */
157 /* Synchronizing actions may have been backtracked */
158 if (asleep->could_synchronize_with(curr))
160 /* All acquire/release fences and fence-acquire/store-release */
161 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
163 /* Fence-release + store can awake load-acquire on the same location */
164 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
165 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
166 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
169 /* The sleep is literally sleeping */
170 if (asleep->is_sleep()) {
171 if (fuzzer->shouldWake(asleep))
178 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
180 for (unsigned int i = 0;i < get_num_threads();i++) {
181 Thread *thr = get_thread(int_to_id(i));
182 if (scheduler->is_sleep_set(thr)) {
183 if (should_wake_up(curr, thr)) {
184 /* Remove this thread from sleep set */
185 scheduler->remove_sleep(thr);
186 if (thr->get_pending()->is_sleep())
187 thr->set_wakeup_state(true);
193 void ModelExecution::assert_bug(const char *msg)
195 priv->bugs.push_back(new bug_message(msg));
199 /** @return True, if any bugs have been reported for this execution */
200 bool ModelExecution::have_bug_reports() const
202 return priv->bugs.size() != 0;
205 SnapVector<bug_message *> * ModelExecution::get_bugs() const
211 * Check whether the current trace has triggered an assertion which should halt
214 * @return True, if the execution should be aborted; false otherwise
216 bool ModelExecution::has_asserted() const
218 return priv->asserted;
222 * Trigger a trace assertion which should cause this execution to be halted.
223 * This can be due to a detected bug or due to an infeasibility that should
226 void ModelExecution::set_assert()
228 priv->asserted = true;
232 * Check if we are in a deadlock. Should only be called at the end of an
233 * execution, although it should not give false positives in the middle of an
234 * execution (there should be some ENABLED thread).
236 * @return True if program is in a deadlock; false otherwise
238 bool ModelExecution::is_deadlocked() const
240 bool blocking_threads = false;
241 for (unsigned int i = 0;i < get_num_threads();i++) {
242 thread_id_t tid = int_to_id(i);
245 Thread *t = get_thread(tid);
246 if (!t->is_model_thread() && t->get_pending())
247 blocking_threads = true;
249 return blocking_threads;
253 * Check if this is a complete execution. That is, have all thread completed
254 * execution (rather than exiting because sleep sets have forced a redundant
257 * @return True if the execution is complete.
259 bool ModelExecution::is_complete_execution() const
261 for (unsigned int i = 0;i < get_num_threads();i++)
262 if (is_enabled(int_to_id(i)))
267 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
268 uint64_t value = *((const uint64_t *) location);
269 modelclock_t storeclock;
270 thread_id_t storethread;
271 getStoreThreadAndClock(location, &storethread, &storeclock);
272 setAtomicStoreFlag(location);
273 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
274 act->set_seq_number(storeclock);
275 add_normal_write_to_lists(act);
276 add_write_to_lists(act);
277 w_modification_order(act);
278 model->get_history()->process_action(act, act->get_tid());
283 * Processes a read model action.
284 * @param curr is the read model action to process.
285 * @param rf_set is the set of model actions we can possibly read from
286 * @return True if processing this read updates the mo_graph.
288 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
290 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
291 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
292 if (hasnonatomicstore) {
293 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
294 rf_set->push_back(nonatomicstore);
297 // Remove writes that violate read modification order
299 while (i < rf_set->size()) {
300 ModelAction * rf = (*rf_set)[i];
301 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
302 (*rf_set)[i] = rf_set->back();
309 int index = fuzzer->selectWrite(curr, rf_set);
311 ModelAction *rf = (*rf_set)[index];
314 bool canprune = false;
315 if (r_modification_order(curr, rf, priorset, &canprune)) {
316 for(unsigned int i=0;i<priorset->size();i++) {
317 mo_graph->addEdge((*priorset)[i], rf);
320 get_thread(curr)->set_return_value(curr->get_return_value());
322 if (canprune && curr->get_type() == ATOMIC_READ) {
323 int tid = id_to_int(curr->get_tid());
324 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
325 curr->setThrdMapRef(NULL);
331 /* TODO: Following code not needed anymore */
333 (*rf_set)[index] = rf_set->back();
339 * Processes a lock, trylock, or unlock model action. @param curr is
340 * the read model action to process.
342 * The try lock operation checks whether the lock is taken. If not,
343 * it falls to the normal lock operation case. If so, it returns
346 * The lock operation has already been checked that it is enabled, so
347 * it just grabs the lock and synchronizes with the previous unlock.
349 * The unlock operation has to re-enable all of the threads that are
350 * waiting on the lock.
352 * @return True if synchronization was updated; false otherwise
354 bool ModelExecution::process_mutex(ModelAction *curr)
356 cdsc::mutex *mutex = curr->get_mutex();
357 struct cdsc::mutex_state *state = NULL;
360 state = mutex->get_state();
362 switch (curr->get_type()) {
363 case ATOMIC_TRYLOCK: {
364 bool success = !state->locked;
365 curr->set_try_lock(success);
367 get_thread(curr)->set_return_value(0);
370 get_thread(curr)->set_return_value(1);
372 //otherwise fall into the lock case
374 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
375 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
376 // assert_bug("Lock access before initialization");
377 state->locked = get_thread(curr);
378 ModelAction *unlock = get_last_unlock(curr);
379 //synchronize with the previous unlock statement
380 if (unlock != NULL) {
381 synchronize(unlock, curr);
387 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
388 if (fuzzer->shouldWait(curr)) {
389 /* wake up the other threads */
390 for (unsigned int i = 0;i < get_num_threads();i++) {
391 Thread *t = get_thread(int_to_id(i));
392 Thread *curr_thrd = get_thread(curr);
393 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
397 /* unlock the lock - after checking who was waiting on it */
398 state->locked = NULL;
400 /* disable this thread */
401 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
402 scheduler->sleep(get_thread(curr));
407 case ATOMIC_TIMEDWAIT:
408 case ATOMIC_UNLOCK: {
409 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
410 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
411 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
412 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
413 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
414 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
415 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
416 //MUST EVENMTUALLY RELEASE...
418 /* wake up the other threads */
419 for (unsigned int i = 0;i < get_num_threads();i++) {
420 Thread *t = get_thread(int_to_id(i));
421 Thread *curr_thrd = get_thread(curr);
422 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
426 /* unlock the lock - after checking who was waiting on it */
427 state->locked = NULL;
430 case ATOMIC_NOTIFY_ALL: {
431 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
432 //activate all the waiting threads
433 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
434 scheduler->wake(get_thread(rit->getVal()));
439 case ATOMIC_NOTIFY_ONE: {
440 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
441 if (waiters->size() != 0) {
442 Thread * thread = fuzzer->selectNotify(waiters);
443 scheduler->wake(thread);
455 * Process a write ModelAction
456 * @param curr The ModelAction to process
457 * @return True if the mo_graph was updated or promises were resolved
459 void ModelExecution::process_write(ModelAction *curr)
461 w_modification_order(curr);
462 get_thread(curr)->set_return_value(VALUE_NONE);
466 * Process a fence ModelAction
467 * @param curr The ModelAction to process
468 * @return True if synchronization was updated
470 bool ModelExecution::process_fence(ModelAction *curr)
473 * fence-relaxed: no-op
474 * fence-release: only log the occurence (not in this function), for
475 * use in later synchronization
476 * fence-acquire (this function): search for hypothetical release
478 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
480 bool updated = false;
481 if (curr->is_acquire()) {
482 action_list_t *list = &action_trace;
483 sllnode<ModelAction *> * rit;
484 /* Find X : is_read(X) && X --sb-> curr */
485 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
486 ModelAction *act = rit->getVal();
489 if (act->get_tid() != curr->get_tid())
491 /* Stop at the beginning of the thread */
492 if (act->is_thread_start())
494 /* Stop once we reach a prior fence-acquire */
495 if (act->is_fence() && act->is_acquire())
499 /* read-acquire will find its own release sequences */
500 if (act->is_acquire())
503 /* Establish hypothetical release sequences */
504 ClockVector *cv = get_hb_from_write(act->get_reads_from());
505 if (cv != NULL && curr->get_cv()->merge(cv))
513 * @brief Process the current action for thread-related activity
515 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
516 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
517 * synchronization, etc. This function is a no-op for non-THREAD actions
518 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
520 * @param curr The current action
521 * @return True if synchronization was updated or a thread completed
523 void ModelExecution::process_thread_action(ModelAction *curr)
525 switch (curr->get_type()) {
526 case THREAD_CREATE: {
527 thrd_t *thrd = (thrd_t *)curr->get_location();
528 struct thread_params *params = (struct thread_params *)curr->get_value();
529 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
530 curr->set_thread_operand(th);
532 th->set_creation(curr);
535 case PTHREAD_CREATE: {
536 (*(uint32_t *)curr->get_location()) = pthread_counter++;
538 struct pthread_params *params = (struct pthread_params *)curr->get_value();
539 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
540 curr->set_thread_operand(th);
542 th->set_creation(curr);
544 if ( pthread_map.size() < pthread_counter )
545 pthread_map.resize( pthread_counter );
546 pthread_map[ pthread_counter-1 ] = th;
551 Thread *blocking = curr->get_thread_operand();
552 ModelAction *act = get_last_action(blocking->get_id());
553 synchronize(act, curr);
557 Thread *blocking = curr->get_thread_operand();
558 ModelAction *act = get_last_action(blocking->get_id());
559 synchronize(act, curr);
560 break; // WL: to be add (modified)
563 case THREADONLY_FINISH:
564 case THREAD_FINISH: {
565 Thread *th = get_thread(curr);
566 if (curr->get_type() == THREAD_FINISH &&
567 th == model->getInitThread()) {
573 /* Wake up any joining threads */
574 for (unsigned int i = 0;i < get_num_threads();i++) {
575 Thread *waiting = get_thread(int_to_id(i));
576 if (waiting->waiting_on() == th &&
577 waiting->get_pending()->is_thread_join())
578 scheduler->wake(waiting);
587 Thread *th = get_thread(curr);
588 th->set_pending(curr);
589 scheduler->add_sleep(th);
598 * Initialize the current action by performing one or more of the following
599 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
600 * manipulating backtracking sets, allocating and
601 * initializing clock vectors, and computing the promises to fulfill.
603 * @param curr The current action, as passed from the user context; may be
604 * freed/invalidated after the execution of this function, with a different
605 * action "returned" its place (pass-by-reference)
606 * @return True if curr is a newly-explored action; false otherwise
608 bool ModelExecution::initialize_curr_action(ModelAction **curr)
610 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
611 ModelAction *newcurr = process_rmw(*curr);
617 ModelAction *newcurr = *curr;
619 newcurr->set_seq_number(get_next_seq_num());
620 /* Always compute new clock vector */
621 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
623 /* Assign most recent release fence */
624 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
626 return true; /* This was a new ModelAction */
631 * @brief Establish reads-from relation between two actions
633 * Perform basic operations involved with establishing a concrete rf relation,
634 * including setting the ModelAction data and checking for release sequences.
636 * @param act The action that is reading (must be a read)
637 * @param rf The action from which we are reading (must be a write)
639 * @return True if this read established synchronization
642 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
645 ASSERT(rf->is_write());
647 act->set_read_from(rf);
648 if (act->is_acquire()) {
649 ClockVector *cv = get_hb_from_write(rf);
652 act->get_cv()->merge(cv);
657 * @brief Synchronizes two actions
659 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
660 * This function performs the synchronization as well as providing other hooks
661 * for other checks along with synchronization.
663 * @param first The left-hand side of the synchronizes-with relation
664 * @param second The right-hand side of the synchronizes-with relation
665 * @return True if the synchronization was successful (i.e., was consistent
666 * with the execution order); false otherwise
668 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
670 if (*second < *first) {
671 ASSERT(0); //This should not happend
674 return second->synchronize_with(first);
678 * @brief Check whether a model action is enabled.
680 * Checks whether an operation would be successful (i.e., is a lock already
681 * locked, or is the joined thread already complete).
683 * For yield-blocking, yields are never enabled.
685 * @param curr is the ModelAction to check whether it is enabled.
686 * @return a bool that indicates whether the action is enabled.
688 bool ModelExecution::check_action_enabled(ModelAction *curr) {
689 if (curr->is_lock()) {
690 cdsc::mutex *lock = curr->get_mutex();
691 struct cdsc::mutex_state *state = lock->get_state();
694 } else if (curr->is_thread_join()) {
695 Thread *blocking = curr->get_thread_operand();
696 if (!blocking->is_complete()) {
699 } else if (curr->is_sleep()) {
700 if (!fuzzer->shouldSleep(curr))
708 * This is the heart of the model checker routine. It performs model-checking
709 * actions corresponding to a given "current action." Among other processes, it
710 * calculates reads-from relationships, updates synchronization clock vectors,
711 * forms a memory_order constraints graph, and handles replay/backtrack
712 * execution when running permutations of previously-observed executions.
714 * @param curr The current action to process
715 * @return The ModelAction that is actually executed; may be different than
718 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
721 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
722 bool newly_explored = initialize_curr_action(&curr);
726 wake_up_sleeping_actions(curr);
728 SnapVector<ModelAction *> * rf_set = NULL;
729 /* Build may_read_from set for newly-created actions */
730 if (newly_explored && curr->is_read())
731 rf_set = build_may_read_from(curr);
733 if (curr->is_read() && !second_part_of_rmw) {
734 process_read(curr, rf_set);
737 ASSERT(rf_set == NULL);
739 /* Add the action to lists */
740 if (!second_part_of_rmw)
741 add_action_to_lists(curr);
743 if (curr->is_write())
744 add_write_to_lists(curr);
746 process_thread_action(curr);
748 if (curr->is_write())
751 if (curr->is_fence())
754 if (curr->is_mutex_op())
760 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
761 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
762 ModelAction *lastread = get_last_action(act->get_tid());
763 lastread->process_rmw(act);
765 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
771 * @brief Updates the mo_graph with the constraints imposed from the current
774 * Basic idea is the following: Go through each other thread and find
775 * the last action that happened before our read. Two cases:
777 * -# The action is a write: that write must either occur before
778 * the write we read from or be the write we read from.
779 * -# The action is a read: the write that that action read from
780 * must occur before the write we read from or be the same write.
782 * @param curr The current action. Must be a read.
783 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
784 * @param check_only If true, then only check whether the current action satisfies
785 * read modification order or not, without modifiying priorset and canprune.
787 * @return True if modification order edges were added; false otherwise
790 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
791 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
793 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
795 ASSERT(curr->is_read());
797 /* Last SC fence in the current thread */
798 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
800 int tid = curr->get_tid();
801 ModelAction *prev_same_thread = NULL;
802 /* Iterate over all threads */
803 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
804 /* Last SC fence in thread tid */
805 ModelAction *last_sc_fence_thread_local = NULL;
807 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
809 /* Last SC fence in thread tid, before last SC fence in current thread */
810 ModelAction *last_sc_fence_thread_before = NULL;
811 if (last_sc_fence_local)
812 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
814 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
815 if (prev_same_thread != NULL &&
816 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
817 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
821 /* Iterate over actions in thread, starting from most recent */
822 action_list_t *list = &(*thrd_lists)[tid];
823 sllnode<ModelAction *> * rit;
824 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
825 ModelAction *act = rit->getVal();
830 /* Don't want to add reflexive edges on 'rf' */
831 if (act->equals(rf)) {
832 if (act->happens_before(curr))
838 if (act->is_write()) {
839 /* C++, Section 29.3 statement 5 */
840 if (curr->is_seqcst() && last_sc_fence_thread_local &&
841 *act < *last_sc_fence_thread_local) {
842 if (mo_graph->checkReachable(rf, act))
845 priorset->push_back(act);
848 /* C++, Section 29.3 statement 4 */
849 else if (act->is_seqcst() && last_sc_fence_local &&
850 *act < *last_sc_fence_local) {
851 if (mo_graph->checkReachable(rf, act))
854 priorset->push_back(act);
857 /* C++, Section 29.3 statement 6 */
858 else if (last_sc_fence_thread_before &&
859 *act < *last_sc_fence_thread_before) {
860 if (mo_graph->checkReachable(rf, act))
863 priorset->push_back(act);
869 * Include at most one act per-thread that "happens
872 if (act->happens_before(curr)) {
874 if (last_sc_fence_local == NULL ||
875 (*last_sc_fence_local < *act)) {
876 prev_same_thread = act;
879 if (act->is_write()) {
880 if (mo_graph->checkReachable(rf, act))
883 priorset->push_back(act);
885 ModelAction *prevrf = act->get_reads_from();
886 if (!prevrf->equals(rf)) {
887 if (mo_graph->checkReachable(rf, prevrf))
890 priorset->push_back(prevrf);
892 if (act->get_tid() == curr->get_tid()) {
893 //Can prune curr from obj list
907 * Updates the mo_graph with the constraints imposed from the current write.
909 * Basic idea is the following: Go through each other thread and find
910 * the lastest action that happened before our write. Two cases:
912 * (1) The action is a write => that write must occur before
915 * (2) The action is a read => the write that that action read from
916 * must occur before the current write.
918 * This method also handles two other issues:
920 * (I) Sequential Consistency: Making sure that if the current write is
921 * seq_cst, that it occurs after the previous seq_cst write.
923 * (II) Sending the write back to non-synchronizing reads.
925 * @param curr The current action. Must be a write.
926 * @param send_fv A vector for stashing reads to which we may pass our future
927 * value. If NULL, then don't record any future values.
928 * @return True if modification order edges were added; false otherwise
930 void ModelExecution::w_modification_order(ModelAction *curr)
932 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
934 ASSERT(curr->is_write());
936 SnapList<ModelAction *> edgeset;
938 if (curr->is_seqcst()) {
939 /* We have to at least see the last sequentially consistent write,
940 so we are initialized. */
941 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
942 if (last_seq_cst != NULL) {
943 edgeset.push_back(last_seq_cst);
945 //update map for next query
946 obj_last_sc_map.put(curr->get_location(), curr);
949 /* Last SC fence in the current thread */
950 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
952 /* Iterate over all threads */
953 for (i = 0;i < thrd_lists->size();i++) {
954 /* Last SC fence in thread i, before last SC fence in current thread */
955 ModelAction *last_sc_fence_thread_before = NULL;
956 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
957 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
959 /* Iterate over actions in thread, starting from most recent */
960 action_list_t *list = &(*thrd_lists)[i];
961 sllnode<ModelAction*>* rit;
962 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
963 ModelAction *act = rit->getVal();
966 * 1) If RMW and it actually read from something, then we
967 * already have all relevant edges, so just skip to next
970 * 2) If RMW and it didn't read from anything, we should
971 * whatever edge we can get to speed up convergence.
973 * 3) If normal write, we need to look at earlier actions, so
974 * continue processing list.
976 if (curr->is_rmw()) {
977 if (curr->get_reads_from() != NULL)
985 /* C++, Section 29.3 statement 7 */
986 if (last_sc_fence_thread_before && act->is_write() &&
987 *act < *last_sc_fence_thread_before) {
988 edgeset.push_back(act);
993 * Include at most one act per-thread that "happens
996 if (act->happens_before(curr)) {
998 * Note: if act is RMW, just add edge:
1000 * The following edge should be handled elsewhere:
1001 * readfrom(act) --mo--> act
1003 if (act->is_write())
1004 edgeset.push_back(act);
1005 else if (act->is_read()) {
1006 //if previous read accessed a null, just keep going
1007 edgeset.push_back(act->get_reads_from());
1013 mo_graph->addEdges(&edgeset, curr);
1018 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1019 * some constraints. This method checks one the following constraint (others
1020 * require compiler support):
1022 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1023 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1025 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1027 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1029 /* Iterate over all threads */
1030 for (i = 0;i < thrd_lists->size();i++) {
1031 const ModelAction *write_after_read = NULL;
1033 /* Iterate over actions in thread, starting from most recent */
1034 action_list_t *list = &(*thrd_lists)[i];
1035 sllnode<ModelAction *>* rit;
1036 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1037 ModelAction *act = rit->getVal();
1039 /* Don't disallow due to act == reader */
1040 if (!reader->happens_before(act) || reader == act)
1042 else if (act->is_write())
1043 write_after_read = act;
1044 else if (act->is_read() && act->get_reads_from() != NULL)
1045 write_after_read = act->get_reads_from();
1048 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1055 * Computes the clock vector that happens before propagates from this write.
1057 * @param rf The action that might be part of a release sequence. Must be a
1059 * @return ClockVector of happens before relation.
1062 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1063 SnapVector<ModelAction *> * processset = NULL;
1064 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1065 ASSERT(rf->is_write());
1066 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1068 if (processset == NULL)
1069 processset = new SnapVector<ModelAction *>();
1070 processset->push_back(rf);
1073 int i = (processset == NULL) ? 0 : processset->size();
1075 ClockVector * vec = NULL;
1077 if (rf->get_rfcv() != NULL) {
1078 vec = rf->get_rfcv();
1079 } else if (rf->is_acquire() && rf->is_release()) {
1081 } else if (rf->is_release() && !rf->is_rmw()) {
1083 } else if (rf->is_release()) {
1084 //have rmw that is release and doesn't have a rfcv
1085 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1088 //operation that isn't release
1089 if (rf->get_last_fence_release()) {
1091 vec = rf->get_last_fence_release()->get_cv();
1093 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1099 rf = (*processset)[i];
1103 if (processset != NULL)
1109 * Performs various bookkeeping operations for the current ModelAction. For
1110 * instance, adds action to the per-object, per-thread action vector and to the
1111 * action trace list of all thread actions.
1113 * @param act is the ModelAction to add.
1115 void ModelExecution::add_action_to_lists(ModelAction *act)
1117 int tid = id_to_int(act->get_tid());
1118 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1119 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1120 act->setActionRef(list->add_back(act));
1123 // Update action trace, a total order of all actions
1124 act->setTraceRef(action_trace.add_back(act));
1127 // Update obj_thrd_map, a per location, per thread, order of actions
1128 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1129 if ((int)vec->size() <= tid) {
1130 uint oldsize = vec->size();
1131 vec->resize(priv->next_thread_id);
1132 for(uint i = oldsize;i < priv->next_thread_id;i++)
1133 new (&(*vec)[i]) action_list_t();
1135 act->setThrdMapRef((*vec)[tid].add_back(act));
1137 // Update thrd_last_action, the last action taken by each thread
1138 if ((int)thrd_last_action.size() <= tid)
1139 thrd_last_action.resize(get_num_threads());
1140 thrd_last_action[tid] = act;
1142 // Update thrd_last_fence_release, the last release fence taken by each thread
1143 if (act->is_fence() && act->is_release()) {
1144 if ((int)thrd_last_fence_release.size() <= tid)
1145 thrd_last_fence_release.resize(get_num_threads());
1146 thrd_last_fence_release[tid] = act;
1149 if (act->is_wait()) {
1150 void *mutex_loc = (void *) act->get_value();
1151 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1153 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1154 if ((int)vec->size() <= tid) {
1155 uint oldsize = vec->size();
1156 vec->resize(priv->next_thread_id);
1157 for(uint i = oldsize;i < priv->next_thread_id;i++)
1158 new (&(*vec)[i]) action_list_t();
1160 act->setThrdMapRef((*vec)[tid].add_back(act));
1164 sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
1165 sllnode<ModelAction*> * rit = list->end();
1166 modelclock_t next_seq = act->get_seq_number();
1167 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1168 return list->add_back(act);
1170 for(;rit != NULL;rit=rit->getPrev()) {
1171 if (rit->getVal()->get_seq_number() == next_seq) {
1172 return list->insertAfter(rit, act);
1179 sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1180 sllnode<ModelAction*> * rit = list->end();
1181 modelclock_t next_seq = act->get_seq_number();
1183 act->create_cv(NULL);
1185 } else if (rit->getVal()->get_seq_number() == next_seq) {
1186 act->create_cv(rit->getVal());
1187 return list->add_back(act);
1189 for(;rit != NULL;rit=rit->getPrev()) {
1190 if (rit->getVal()->get_seq_number() == next_seq) {
1191 act->create_cv(rit->getVal());
1192 return list->insertAfter(rit, act);
1200 * Performs various bookkeeping operations for a normal write. The
1201 * complication is that we are typically inserting a normal write
1202 * lazily, so we need to insert it into the middle of lists.
1204 * @param act is the ModelAction to add.
1207 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1209 int tid = id_to_int(act->get_tid());
1210 act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
1212 // Update obj_thrd_map, a per location, per thread, order of actions
1213 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1214 if (tid >= (int)vec->size()) {
1215 uint oldsize =vec->size();
1216 vec->resize(priv->next_thread_id);
1217 for(uint i=oldsize;i<priv->next_thread_id;i++)
1218 new (&(*vec)[i]) action_list_t();
1220 act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
1222 // Update thrd_last_action, the last action taken by each thrad
1223 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1224 thrd_last_action[tid] = act;
1228 void ModelExecution::add_write_to_lists(ModelAction *write) {
1229 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1230 int tid = id_to_int(write->get_tid());
1231 if (tid >= (int)vec->size()) {
1232 uint oldsize =vec->size();
1233 vec->resize(priv->next_thread_id);
1234 for(uint i=oldsize;i<priv->next_thread_id;i++)
1235 new (&(*vec)[i]) action_list_t();
1237 write->setActionRef((*vec)[tid].add_back(write));
1241 * @brief Get the last action performed by a particular Thread
1242 * @param tid The thread ID of the Thread in question
1243 * @return The last action in the thread
1245 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1247 int threadid = id_to_int(tid);
1248 if (threadid < (int)thrd_last_action.size())
1249 return thrd_last_action[id_to_int(tid)];
1255 * @brief Get the last fence release performed by a particular Thread
1256 * @param tid The thread ID of the Thread in question
1257 * @return The last fence release in the thread, if one exists; NULL otherwise
1259 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1261 int threadid = id_to_int(tid);
1262 if (threadid < (int)thrd_last_fence_release.size())
1263 return thrd_last_fence_release[id_to_int(tid)];
1269 * Gets the last memory_order_seq_cst write (in the total global sequence)
1270 * performed on a particular object (i.e., memory location), not including the
1272 * @param curr The current ModelAction; also denotes the object location to
1274 * @return The last seq_cst write
1276 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1278 void *location = curr->get_location();
1279 return obj_last_sc_map.get(location);
1283 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1284 * performed in a particular thread, prior to a particular fence.
1285 * @param tid The ID of the thread to check
1286 * @param before_fence The fence from which to begin the search; if NULL, then
1287 * search for the most recent fence in the thread.
1288 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1290 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1292 /* All fences should have location FENCE_LOCATION */
1293 action_list_t *list = obj_map.get(FENCE_LOCATION);
1298 sllnode<ModelAction*>* rit = list->end();
1301 for (;rit != NULL;rit=rit->getPrev())
1302 if (rit->getVal() == before_fence)
1305 ASSERT(rit->getVal() == before_fence);
1309 for (;rit != NULL;rit=rit->getPrev()) {
1310 ModelAction *act = rit->getVal();
1311 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1318 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1319 * location). This function identifies the mutex according to the current
1320 * action, which is presumed to perform on the same mutex.
1321 * @param curr The current ModelAction; also denotes the object location to
1323 * @return The last unlock operation
1325 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1327 void *location = curr->get_location();
1329 action_list_t *list = obj_map.get(location);
1333 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1334 sllnode<ModelAction*>* rit;
1335 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1336 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1337 return rit->getVal();
1341 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1343 ModelAction *parent = get_last_action(tid);
1345 parent = get_thread(tid)->get_creation();
1350 * Returns the clock vector for a given thread.
1351 * @param tid The thread whose clock vector we want
1352 * @return Desired clock vector
1354 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1356 ModelAction *firstaction=get_parent_action(tid);
1357 return firstaction != NULL ? firstaction->get_cv() : NULL;
1360 bool valequals(uint64_t val1, uint64_t val2, int size) {
1363 return ((uint8_t)val1) == ((uint8_t)val2);
1365 return ((uint16_t)val1) == ((uint16_t)val2);
1367 return ((uint32_t)val1) == ((uint32_t)val2);
1377 * Build up an initial set of all past writes that this 'read' action may read
1378 * from, as well as any previously-observed future values that must still be valid.
1380 * @param curr is the current ModelAction that we are exploring; it must be a
1383 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1385 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1387 ASSERT(curr->is_read());
1389 ModelAction *last_sc_write = NULL;
1391 if (curr->is_seqcst())
1392 last_sc_write = get_last_seq_cst_write(curr);
1394 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1396 /* Iterate over all threads */
1397 for (i = 0;i < thrd_lists->size();i++) {
1398 /* Iterate over actions in thread, starting from most recent */
1399 action_list_t *list = &(*thrd_lists)[i];
1400 sllnode<ModelAction *> * rit;
1401 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1402 ModelAction *act = rit->getVal();
1407 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1408 bool allow_read = true;
1410 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1413 /* Need to check whether we will have two RMW reading from the same value */
1414 if (curr->is_rmwr()) {
1415 /* It is okay if we have a failing CAS */
1416 if (!curr->is_rmwrcas() ||
1417 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1418 //Need to make sure we aren't the second RMW
1419 CycleNode * node = mo_graph->getNode_noCreate(act);
1420 if (node != NULL && node->getRMW() != NULL) {
1421 //we are the second RMW
1428 /* Only add feasible reads */
1429 rf_set->push_back(act);
1432 /* Include at most one act per-thread that "happens before" curr */
1433 if (act->happens_before(curr))
1438 if (DBG_ENABLED()) {
1439 model_print("Reached read action:\n");
1441 model_print("End printing read_from_past\n");
1446 static void print_list(action_list_t *list)
1448 sllnode<ModelAction*> *it;
1450 model_print("------------------------------------------------------------------------------------\n");
1451 model_print("# t Action type MO Location Value Rf CV\n");
1452 model_print("------------------------------------------------------------------------------------\n");
1454 unsigned int hash = 0;
1456 for (it = list->begin();it != NULL;it=it->getNext()) {
1457 const ModelAction *act = it->getVal();
1458 if (act->get_seq_number() > 0)
1460 hash = hash^(hash<<3)^(it->getVal()->hash());
1462 model_print("HASH %u\n", hash);
1463 model_print("------------------------------------------------------------------------------------\n");
1466 #if SUPPORT_MOD_ORDER_DUMP
1467 void ModelExecution::dumpGraph(char *filename)
1470 sprintf(buffer, "%s.dot", filename);
1471 FILE *file = fopen(buffer, "w");
1472 fprintf(file, "digraph %s {\n", filename);
1473 mo_graph->dumpNodes(file);
1474 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1476 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1477 ModelAction *act = it->getVal();
1478 if (act->is_read()) {
1479 mo_graph->dot_print_node(file, act);
1480 mo_graph->dot_print_edge(file,
1481 act->get_reads_from(),
1483 "label=\"rf\", color=red, weight=2");
1485 if (thread_array[act->get_tid()]) {
1486 mo_graph->dot_print_edge(file,
1487 thread_array[id_to_int(act->get_tid())],
1489 "label=\"sb\", color=blue, weight=400");
1492 thread_array[act->get_tid()] = act;
1494 fprintf(file, "}\n");
1495 model_free(thread_array);
1500 /** @brief Prints an execution trace summary. */
1501 void ModelExecution::print_summary()
1503 #if SUPPORT_MOD_ORDER_DUMP
1504 char buffername[100];
1505 sprintf(buffername, "exec%04u", get_execution_number());
1506 mo_graph->dumpGraphToFile(buffername);
1507 sprintf(buffername, "graph%04u", get_execution_number());
1508 dumpGraph(buffername);
1511 model_print("Execution trace %d:", get_execution_number());
1512 if (scheduler->all_threads_sleeping())
1513 model_print(" SLEEP-SET REDUNDANT");
1514 if (have_bug_reports())
1515 model_print(" DETECTED BUG(S)");
1519 print_list(&action_trace);
1525 * Add a Thread to the system for the first time. Should only be called once
1527 * @param t The Thread to add
1529 void ModelExecution::add_thread(Thread *t)
1531 unsigned int i = id_to_int(t->get_id());
1532 if (i >= thread_map.size())
1533 thread_map.resize(i + 1);
1535 if (!t->is_model_thread())
1536 scheduler->add_thread(t);
1540 * @brief Get a Thread reference by its ID
1541 * @param tid The Thread's ID
1542 * @return A Thread reference
1544 Thread * ModelExecution::get_thread(thread_id_t tid) const
1546 unsigned int i = id_to_int(tid);
1547 if (i < thread_map.size())
1548 return thread_map[i];
1553 * @brief Get a reference to the Thread in which a ModelAction was executed
1554 * @param act The ModelAction
1555 * @return A Thread reference
1557 Thread * ModelExecution::get_thread(const ModelAction *act) const
1559 return get_thread(act->get_tid());
1563 * @brief Get a Thread reference by its pthread ID
1564 * @param index The pthread's ID
1565 * @return A Thread reference
1567 Thread * ModelExecution::get_pthread(pthread_t pid) {
1573 uint32_t thread_id = x.v;
1574 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1579 * @brief Check if a Thread is currently enabled
1580 * @param t The Thread to check
1581 * @return True if the Thread is currently enabled
1583 bool ModelExecution::is_enabled(Thread *t) const
1585 return scheduler->is_enabled(t);
1589 * @brief Check if a Thread is currently enabled
1590 * @param tid The ID of the Thread to check
1591 * @return True if the Thread is currently enabled
1593 bool ModelExecution::is_enabled(thread_id_t tid) const
1595 return scheduler->is_enabled(tid);
1599 * @brief Select the next thread to execute based on the curren action
1601 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1602 * actions should be followed by the execution of their child thread. In either
1603 * case, the current action should determine the next thread schedule.
1605 * @param curr The current action
1606 * @return The next thread to run, if the current action will determine this
1607 * selection; otherwise NULL
1609 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1611 /* Do not split atomic RMW */
1612 if (curr->is_rmwr() && !paused_by_fuzzer(curr))
1613 return get_thread(curr);
1614 /* Follow CREATE with the created thread */
1615 /* which is not needed, because model.cc takes care of this */
1616 if (curr->get_type() == THREAD_CREATE)
1617 return curr->get_thread_operand();
1618 if (curr->get_type() == PTHREAD_CREATE) {
1619 return curr->get_thread_operand();
1624 /** @param act A read atomic action */
1625 bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
1627 ASSERT(act->is_read());
1629 // Actions paused by fuzzer have their sequence number reset to 0
1630 return act->get_seq_number() == 0;
1634 * Takes the next step in the execution, if possible.
1635 * @param curr The current step to take
1636 * @return Returns the next Thread to run, if any; NULL if this execution
1639 Thread * ModelExecution::take_step(ModelAction *curr)
1641 Thread *curr_thrd = get_thread(curr);
1642 ASSERT(curr_thrd->get_state() == THREAD_READY);
1644 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1645 curr = check_current_action(curr);
1648 /* Process this action in ModelHistory for records */
1649 model->get_history()->process_action( curr, curr->get_tid() );
1651 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1652 scheduler->remove_thread(curr_thrd);
1654 return action_select_next_thread(curr);
1657 void ModelExecution::removeAction(ModelAction *act) {
1659 sllnode<ModelAction *> * listref = act->getTraceRef();
1660 if (listref != NULL) {
1661 action_trace.erase(listref);
1665 sllnode<ModelAction *> * listref = act->getThrdMapRef();
1666 if (listref != NULL) {
1667 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1668 (*vec)[act->get_tid()].erase(listref);
1671 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1672 sllnode<ModelAction *> * listref = act->getActionRef();
1673 if (listref != NULL) {
1674 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1675 list->erase(listref);
1677 } else if (act->is_wait()) {
1678 sllnode<ModelAction *> * listref = act->getActionRef();
1679 if (listref != NULL) {
1680 void *mutex_loc = (void *) act->get_value();
1681 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1683 } else if (act->is_write()) {
1684 sllnode<ModelAction *> * listref = act->getActionRef();
1685 if (listref != NULL) {
1686 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1687 (*vec)[act->get_tid()].erase(listref);
1689 //Remove from Cyclegraph
1690 mo_graph->freeAction(act);
1694 ClockVector * ModelExecution::computeMinimalCV() {
1695 ClockVector *cvmin = NULL;
1696 for(unsigned int i = 0;i < thread_map.size();i++) {
1697 Thread * t = thread_map[i];
1698 if (t->get_state() == THREAD_COMPLETED)
1700 thread_id_t tid = int_to_id(i);
1701 ClockVector * cv = get_cv(tid);
1703 cvmin = new ClockVector(cv, NULL);
1705 cvmin->minmerge(cv);
1711 //How often to check for memory
1712 //How much of the trace to always keep
1713 //Whether to sacrifice completeness...i.e., remove visible writes
1715 void ModelExecution::collectActions() {
1716 //Compute minimal clock vector for all live threads
1717 ClockVector *cvmin = computeMinimalCV();
1718 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1719 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1721 //Next walk action trace... When we hit an action, see if it is
1722 //invisible (e.g., earlier than the first before the minimum
1723 //clock for the thread... if so erase it and all previous
1724 //actions in cyclegraph
1725 sllnode<ModelAction*> * it;
1726 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1727 ModelAction *act = it->getVal();
1728 modelclock_t actseq = act->get_seq_number();
1730 //See if we are done
1731 if (actseq > maxtofree)
1734 thread_id_t act_tid = act->get_tid();
1735 modelclock_t tid_clock = cvmin->getClock(act_tid);
1736 if (actseq <= tid_clock || params->removevisible) {
1737 ModelAction * write;
1738 if (act->is_write()) {
1740 } else if (act->is_read()) {
1741 write = act->get_reads_from();
1745 //Mark everything earlier in MO graph to be freed
1746 CycleNode * cn = mo_graph->getNode_noCreate(write);
1747 queue->push_back(cn);
1748 while(!queue->empty()) {
1749 CycleNode * node = queue->back();
1751 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1752 CycleNode * prevnode = node->getInEdge(i);
1753 ModelAction * prevact = prevnode->getAction();
1754 if (prevact->get_type() != READY_FREE) {
1755 prevact->set_free();
1756 queue->push_back(prevnode);
1762 for (;it != NULL;it=it->getPrev()) {
1763 ModelAction *act = it->getVal();
1764 if (act->is_free()) {
1767 } else if (act->is_read()) {
1768 if (act->get_reads_from()->is_free()) {
1772 const ModelAction *rel_fence =act->get_last_fence_release();
1773 if (rel_fence != NULL) {
1774 modelclock_t relfenceseq = rel_fence->get_seq_number();
1775 thread_id_t relfence_tid = rel_fence->get_tid();
1776 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1777 //Remove references to irrelevant release fences
1778 if (relfenceseq <= tid_clock)
1779 act->set_last_fence_release(NULL);
1782 } else if (act->is_fence()) {
1783 //Note that acquire fences can always be safely
1784 //removed, but could incur extra overheads in
1785 //traversals. Removing them before the cvmin seems
1786 //like a good compromise.
1788 //Release fences before the cvmin don't do anything
1789 //because everyone has already synchronized.
1791 //Sequentially fences before cvmin are redundant
1792 //because happens-before will enforce same
1795 modelclock_t actseq = act->get_seq_number();
1796 thread_id_t act_tid = act->get_tid();
1797 modelclock_t tid_clock = cvmin->getClock(act_tid);
1798 if (actseq <= tid_clock) {
1803 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish
1804 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1805 //need to keep most recent unlock/wait for each lock
1806 if(act->is_unlock() || act->is_wait()) {
1807 ModelAction * lastlock = get_last_unlock(act);
1808 if (lastlock != act) {
1825 Fuzzer * ModelExecution::getFuzzer() {