11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
49 /** @brief Constructor */
50 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
54 thread_map(2), /* We'll always need at least 2 threads */
59 condvar_waiters_map(),
63 thrd_last_fence_release(),
64 priv(new struct model_snapshot_members ()),
65 mo_graph(new CycleGraph()),
66 fuzzer(new NewFuzzer()),
69 /* Initialize a model-checker thread, for special ModelActions */
70 model_thread = new Thread(get_next_id());
71 add_thread(model_thread);
72 fuzzer->register_engine(m->get_history(), this);
73 scheduler->register_engine(this);
75 pthread_key_create(&pthreadkey, tlsdestructor);
79 /** @brief Destructor */
80 ModelExecution::~ModelExecution()
82 for (unsigned int i = 0;i < get_num_threads();i++)
83 delete get_thread(int_to_id(i));
89 int ModelExecution::get_execution_number() const
91 return model->get_execution_number();
94 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
96 action_list_t *tmp = hash->get(ptr);
98 tmp = new action_list_t();
104 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
106 SnapVector<action_list_t> *tmp = hash->get(ptr);
108 tmp = new SnapVector<action_list_t>();
114 /** @return a thread ID for a new Thread */
115 thread_id_t ModelExecution::get_next_id()
117 return priv->next_thread_id++;
120 /** @return the number of user threads created during this execution */
121 unsigned int ModelExecution::get_num_threads() const
123 return priv->next_thread_id;
126 /** @return a sequence number for a new ModelAction */
127 modelclock_t ModelExecution::get_next_seq_num()
129 return ++priv->used_sequence_numbers;
132 /** @return a sequence number for a new ModelAction */
133 modelclock_t ModelExecution::get_curr_seq_num()
135 return priv->used_sequence_numbers;
138 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
139 void ModelExecution::restore_last_seq_num()
141 priv->used_sequence_numbers--;
145 * @brief Should the current action wake up a given thread?
147 * @param curr The current action
148 * @param thread The thread that we might wake up
149 * @return True, if we should wake up the sleeping thread; false otherwise
151 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
153 const ModelAction *asleep = thread->get_pending();
154 /* Don't allow partial RMW to wake anyone up */
157 /* Synchronizing actions may have been backtracked */
158 if (asleep->could_synchronize_with(curr))
160 /* All acquire/release fences and fence-acquire/store-release */
161 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
163 /* Fence-release + store can awake load-acquire on the same location */
164 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
165 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
166 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
169 /* The sleep is literally sleeping */
170 if (asleep->is_sleep()) {
171 if (fuzzer->shouldWake(asleep))
178 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
180 for (unsigned int i = 0;i < get_num_threads();i++) {
181 Thread *thr = get_thread(int_to_id(i));
182 if (scheduler->is_sleep_set(thr)) {
183 if (should_wake_up(curr, thr)) {
184 /* Remove this thread from sleep set */
185 scheduler->remove_sleep(thr);
186 if (thr->get_pending()->is_sleep())
187 thr->set_wakeup_state(true);
193 void ModelExecution::assert_bug(const char *msg)
195 priv->bugs.push_back(new bug_message(msg));
199 /** @return True, if any bugs have been reported for this execution */
200 bool ModelExecution::have_bug_reports() const
202 return priv->bugs.size() != 0;
205 SnapVector<bug_message *> * ModelExecution::get_bugs() const
211 * Check whether the current trace has triggered an assertion which should halt
214 * @return True, if the execution should be aborted; false otherwise
216 bool ModelExecution::has_asserted() const
218 return priv->asserted;
222 * Trigger a trace assertion which should cause this execution to be halted.
223 * This can be due to a detected bug or due to an infeasibility that should
226 void ModelExecution::set_assert()
228 priv->asserted = true;
232 * Check if we are in a deadlock. Should only be called at the end of an
233 * execution, although it should not give false positives in the middle of an
234 * execution (there should be some ENABLED thread).
236 * @return True if program is in a deadlock; false otherwise
238 bool ModelExecution::is_deadlocked() const
240 bool blocking_threads = false;
241 for (unsigned int i = 0;i < get_num_threads();i++) {
242 thread_id_t tid = int_to_id(i);
245 Thread *t = get_thread(tid);
246 if (!t->is_model_thread() && t->get_pending())
247 blocking_threads = true;
249 return blocking_threads;
253 * Check if this is a complete execution. That is, have all thread completed
254 * execution (rather than exiting because sleep sets have forced a redundant
257 * @return True if the execution is complete.
259 bool ModelExecution::is_complete_execution() const
261 for (unsigned int i = 0;i < get_num_threads();i++)
262 if (is_enabled(int_to_id(i)))
267 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
268 uint64_t value = *((const uint64_t *) location);
269 modelclock_t storeclock;
270 thread_id_t storethread;
271 getStoreThreadAndClock(location, &storethread, &storeclock);
272 setAtomicStoreFlag(location);
273 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
274 act->set_seq_number(storeclock);
275 add_normal_write_to_lists(act);
276 add_write_to_lists(act);
277 w_modification_order(act);
278 model->get_history()->process_action(act, act->get_tid());
283 * Processes a read model action.
284 * @param curr is the read model action to process.
285 * @param rf_set is the set of model actions we can possibly read from
286 * @return True if processing this read updates the mo_graph.
288 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
290 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
291 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
292 if (hasnonatomicstore) {
293 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
294 rf_set->push_back(nonatomicstore);
297 // Remove writes that violate read modification order
300 while (i < rf_set->size()) {
301 ModelAction * rf = (*rf_set)[i];
302 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
303 (*rf_set)[i] = rf_set->back();
310 int index = fuzzer->selectWrite(curr, rf_set);
312 ModelAction *rf = (*rf_set)[index];
315 bool canprune = false;
316 if (r_modification_order(curr, rf, priorset, &canprune)) {
317 for(unsigned int i=0;i<priorset->size();i++) {
318 mo_graph->addEdge((*priorset)[i], rf);
321 get_thread(curr)->set_return_value(curr->get_return_value());
323 if (canprune && curr->get_type() == ATOMIC_READ) {
324 int tid = id_to_int(curr->get_tid());
325 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
326 curr->setThrdMapRef(NULL);
331 (*rf_set)[index] = rf_set->back();
337 * Processes a lock, trylock, or unlock model action. @param curr is
338 * the read model action to process.
340 * The try lock operation checks whether the lock is taken. If not,
341 * it falls to the normal lock operation case. If so, it returns
344 * The lock operation has already been checked that it is enabled, so
345 * it just grabs the lock and synchronizes with the previous unlock.
347 * The unlock operation has to re-enable all of the threads that are
348 * waiting on the lock.
350 * @return True if synchronization was updated; false otherwise
352 bool ModelExecution::process_mutex(ModelAction *curr)
354 cdsc::mutex *mutex = curr->get_mutex();
355 struct cdsc::mutex_state *state = NULL;
358 state = mutex->get_state();
360 switch (curr->get_type()) {
361 case ATOMIC_TRYLOCK: {
362 bool success = !state->locked;
363 curr->set_try_lock(success);
365 get_thread(curr)->set_return_value(0);
368 get_thread(curr)->set_return_value(1);
370 //otherwise fall into the lock case
372 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
373 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
374 // assert_bug("Lock access before initialization");
375 state->locked = get_thread(curr);
376 ModelAction *unlock = get_last_unlock(curr);
377 //synchronize with the previous unlock statement
378 if (unlock != NULL) {
379 synchronize(unlock, curr);
385 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
386 if (fuzzer->shouldWait(curr)) {
387 /* wake up the other threads */
388 for (unsigned int i = 0;i < get_num_threads();i++) {
389 Thread *t = get_thread(int_to_id(i));
390 Thread *curr_thrd = get_thread(curr);
391 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
395 /* unlock the lock - after checking who was waiting on it */
396 state->locked = NULL;
398 /* disable this thread */
399 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
400 scheduler->sleep(get_thread(curr));
405 case ATOMIC_TIMEDWAIT:
406 case ATOMIC_UNLOCK: {
407 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
408 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
409 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
410 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
411 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
412 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
413 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
414 //MUST EVENMTUALLY RELEASE...
416 /* wake up the other threads */
417 for (unsigned int i = 0;i < get_num_threads();i++) {
418 Thread *t = get_thread(int_to_id(i));
419 Thread *curr_thrd = get_thread(curr);
420 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
424 /* unlock the lock - after checking who was waiting on it */
425 state->locked = NULL;
428 case ATOMIC_NOTIFY_ALL: {
429 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
430 //activate all the waiting threads
431 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
432 scheduler->wake(get_thread(rit->getVal()));
437 case ATOMIC_NOTIFY_ONE: {
438 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
439 if (waiters->size() != 0) {
440 Thread * thread = fuzzer->selectNotify(waiters);
441 scheduler->wake(thread);
453 * Process a write ModelAction
454 * @param curr The ModelAction to process
455 * @return True if the mo_graph was updated or promises were resolved
457 void ModelExecution::process_write(ModelAction *curr)
459 w_modification_order(curr);
460 get_thread(curr)->set_return_value(VALUE_NONE);
464 * Process a fence ModelAction
465 * @param curr The ModelAction to process
466 * @return True if synchronization was updated
468 bool ModelExecution::process_fence(ModelAction *curr)
471 * fence-relaxed: no-op
472 * fence-release: only log the occurence (not in this function), for
473 * use in later synchronization
474 * fence-acquire (this function): search for hypothetical release
476 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
478 bool updated = false;
479 if (curr->is_acquire()) {
480 action_list_t *list = &action_trace;
481 sllnode<ModelAction *> * rit;
482 /* Find X : is_read(X) && X --sb-> curr */
483 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
484 ModelAction *act = rit->getVal();
487 if (act->get_tid() != curr->get_tid())
489 /* Stop at the beginning of the thread */
490 if (act->is_thread_start())
492 /* Stop once we reach a prior fence-acquire */
493 if (act->is_fence() && act->is_acquire())
497 /* read-acquire will find its own release sequences */
498 if (act->is_acquire())
501 /* Establish hypothetical release sequences */
502 ClockVector *cv = get_hb_from_write(act->get_reads_from());
503 if (cv != NULL && curr->get_cv()->merge(cv))
511 * @brief Process the current action for thread-related activity
513 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
514 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
515 * synchronization, etc. This function is a no-op for non-THREAD actions
516 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
518 * @param curr The current action
519 * @return True if synchronization was updated or a thread completed
521 void ModelExecution::process_thread_action(ModelAction *curr)
523 switch (curr->get_type()) {
524 case THREAD_CREATE: {
525 thrd_t *thrd = (thrd_t *)curr->get_location();
526 struct thread_params *params = (struct thread_params *)curr->get_value();
527 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
528 curr->set_thread_operand(th);
530 th->set_creation(curr);
533 case PTHREAD_CREATE: {
534 (*(uint32_t *)curr->get_location()) = pthread_counter++;
536 struct pthread_params *params = (struct pthread_params *)curr->get_value();
537 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
538 curr->set_thread_operand(th);
540 th->set_creation(curr);
542 if ( pthread_map.size() < pthread_counter )
543 pthread_map.resize( pthread_counter );
544 pthread_map[ pthread_counter-1 ] = th;
549 Thread *blocking = curr->get_thread_operand();
550 ModelAction *act = get_last_action(blocking->get_id());
551 synchronize(act, curr);
555 Thread *blocking = curr->get_thread_operand();
556 ModelAction *act = get_last_action(blocking->get_id());
557 synchronize(act, curr);
558 break; // WL: to be add (modified)
561 case THREADONLY_FINISH:
562 case THREAD_FINISH: {
563 Thread *th = get_thread(curr);
564 if (curr->get_type() == THREAD_FINISH &&
565 th == model->getInitThread()) {
571 /* Wake up any joining threads */
572 for (unsigned int i = 0;i < get_num_threads();i++) {
573 Thread *waiting = get_thread(int_to_id(i));
574 if (waiting->waiting_on() == th &&
575 waiting->get_pending()->is_thread_join())
576 scheduler->wake(waiting);
585 Thread *th = get_thread(curr);
586 th->set_pending(curr);
587 scheduler->add_sleep(th);
596 * Initialize the current action by performing one or more of the following
597 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
598 * manipulating backtracking sets, allocating and
599 * initializing clock vectors, and computing the promises to fulfill.
601 * @param curr The current action, as passed from the user context; may be
602 * freed/invalidated after the execution of this function, with a different
603 * action "returned" its place (pass-by-reference)
604 * @return True if curr is a newly-explored action; false otherwise
606 bool ModelExecution::initialize_curr_action(ModelAction **curr)
608 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
609 ModelAction *newcurr = process_rmw(*curr);
615 ModelAction *newcurr = *curr;
617 newcurr->set_seq_number(get_next_seq_num());
618 /* Always compute new clock vector */
619 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
621 /* Assign most recent release fence */
622 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
624 return true; /* This was a new ModelAction */
629 * @brief Establish reads-from relation between two actions
631 * Perform basic operations involved with establishing a concrete rf relation,
632 * including setting the ModelAction data and checking for release sequences.
634 * @param act The action that is reading (must be a read)
635 * @param rf The action from which we are reading (must be a write)
637 * @return True if this read established synchronization
640 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
643 ASSERT(rf->is_write());
645 act->set_read_from(rf);
646 if (act->is_acquire()) {
647 ClockVector *cv = get_hb_from_write(rf);
650 act->get_cv()->merge(cv);
655 * @brief Synchronizes two actions
657 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
658 * This function performs the synchronization as well as providing other hooks
659 * for other checks along with synchronization.
661 * @param first The left-hand side of the synchronizes-with relation
662 * @param second The right-hand side of the synchronizes-with relation
663 * @return True if the synchronization was successful (i.e., was consistent
664 * with the execution order); false otherwise
666 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
668 if (*second < *first) {
669 ASSERT(0); //This should not happend
672 return second->synchronize_with(first);
676 * @brief Check whether a model action is enabled.
678 * Checks whether an operation would be successful (i.e., is a lock already
679 * locked, or is the joined thread already complete).
681 * For yield-blocking, yields are never enabled.
683 * @param curr is the ModelAction to check whether it is enabled.
684 * @return a bool that indicates whether the action is enabled.
686 bool ModelExecution::check_action_enabled(ModelAction *curr) {
687 if (curr->is_lock()) {
688 cdsc::mutex *lock = curr->get_mutex();
689 struct cdsc::mutex_state *state = lock->get_state();
692 } else if (curr->is_thread_join()) {
693 Thread *blocking = curr->get_thread_operand();
694 if (!blocking->is_complete()) {
697 } else if (curr->is_sleep()) {
698 if (!fuzzer->shouldSleep(curr))
706 * This is the heart of the model checker routine. It performs model-checking
707 * actions corresponding to a given "current action." Among other processes, it
708 * calculates reads-from relationships, updates synchronization clock vectors,
709 * forms a memory_order constraints graph, and handles replay/backtrack
710 * execution when running permutations of previously-observed executions.
712 * @param curr The current action to process
713 * @return The ModelAction that is actually executed; may be different than
716 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
719 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
720 bool newly_explored = initialize_curr_action(&curr);
724 wake_up_sleeping_actions(curr);
726 SnapVector<ModelAction *> * rf_set = NULL;
727 /* Build may_read_from set for newly-created actions */
728 if (newly_explored && curr->is_read())
729 rf_set = build_may_read_from(curr);
731 if (curr->is_read() && !second_part_of_rmw) {
732 process_read(curr, rf_set);
735 ASSERT(rf_set == NULL);
737 /* Add the action to lists */
738 if (!second_part_of_rmw)
739 add_action_to_lists(curr);
741 if (curr->is_write())
742 add_write_to_lists(curr);
744 process_thread_action(curr);
746 if (curr->is_write())
749 if (curr->is_fence())
752 if (curr->is_mutex_op())
758 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
759 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
760 ModelAction *lastread = get_last_action(act->get_tid());
761 lastread->process_rmw(act);
763 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
769 * @brief Updates the mo_graph with the constraints imposed from the current
772 * Basic idea is the following: Go through each other thread and find
773 * the last action that happened before our read. Two cases:
775 * -# The action is a write: that write must either occur before
776 * the write we read from or be the write we read from.
777 * -# The action is a read: the write that that action read from
778 * must occur before the write we read from or be the same write.
780 * @param curr The current action. Must be a read.
781 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
782 * @param check_only If true, then only check whether the current action satisfies
783 * read modification order or not, without modifiying priorset and canprune.
785 * @return True if modification order edges were added; false otherwise
788 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
789 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
791 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
793 ASSERT(curr->is_read());
795 /* Last SC fence in the current thread */
796 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
798 int tid = curr->get_tid();
799 ModelAction *prev_same_thread = NULL;
800 /* Iterate over all threads */
801 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
802 /* Last SC fence in thread tid */
803 ModelAction *last_sc_fence_thread_local = NULL;
805 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
807 /* Last SC fence in thread tid, before last SC fence in current thread */
808 ModelAction *last_sc_fence_thread_before = NULL;
809 if (last_sc_fence_local)
810 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
812 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
813 if (prev_same_thread != NULL &&
814 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
815 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
819 /* Iterate over actions in thread, starting from most recent */
820 action_list_t *list = &(*thrd_lists)[tid];
821 sllnode<ModelAction *> * rit;
822 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
823 ModelAction *act = rit->getVal();
828 /* Don't want to add reflexive edges on 'rf' */
829 if (act->equals(rf)) {
830 if (act->happens_before(curr))
836 if (act->is_write()) {
837 /* C++, Section 29.3 statement 5 */
838 if (curr->is_seqcst() && last_sc_fence_thread_local &&
839 *act < *last_sc_fence_thread_local) {
840 if (mo_graph->checkReachable(rf, act))
843 priorset->push_back(act);
846 /* C++, Section 29.3 statement 4 */
847 else if (act->is_seqcst() && last_sc_fence_local &&
848 *act < *last_sc_fence_local) {
849 if (mo_graph->checkReachable(rf, act))
852 priorset->push_back(act);
855 /* C++, Section 29.3 statement 6 */
856 else if (last_sc_fence_thread_before &&
857 *act < *last_sc_fence_thread_before) {
858 if (mo_graph->checkReachable(rf, act))
861 priorset->push_back(act);
867 * Include at most one act per-thread that "happens
870 if (act->happens_before(curr)) {
872 if (last_sc_fence_local == NULL ||
873 (*last_sc_fence_local < *act)) {
874 prev_same_thread = act;
877 if (act->is_write()) {
878 if (mo_graph->checkReachable(rf, act))
881 priorset->push_back(act);
883 ModelAction *prevrf = act->get_reads_from();
884 if (!prevrf->equals(rf)) {
885 if (mo_graph->checkReachable(rf, prevrf))
888 priorset->push_back(prevrf);
890 if (act->get_tid() == curr->get_tid()) {
891 //Can prune curr from obj list
905 * Updates the mo_graph with the constraints imposed from the current write.
907 * Basic idea is the following: Go through each other thread and find
908 * the lastest action that happened before our write. Two cases:
910 * (1) The action is a write => that write must occur before
913 * (2) The action is a read => the write that that action read from
914 * must occur before the current write.
916 * This method also handles two other issues:
918 * (I) Sequential Consistency: Making sure that if the current write is
919 * seq_cst, that it occurs after the previous seq_cst write.
921 * (II) Sending the write back to non-synchronizing reads.
923 * @param curr The current action. Must be a write.
924 * @param send_fv A vector for stashing reads to which we may pass our future
925 * value. If NULL, then don't record any future values.
926 * @return True if modification order edges were added; false otherwise
928 void ModelExecution::w_modification_order(ModelAction *curr)
930 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
932 ASSERT(curr->is_write());
934 SnapList<ModelAction *> edgeset;
936 if (curr->is_seqcst()) {
937 /* We have to at least see the last sequentially consistent write,
938 so we are initialized. */
939 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
940 if (last_seq_cst != NULL) {
941 edgeset.push_back(last_seq_cst);
943 //update map for next query
944 obj_last_sc_map.put(curr->get_location(), curr);
947 /* Last SC fence in the current thread */
948 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
950 /* Iterate over all threads */
951 for (i = 0;i < thrd_lists->size();i++) {
952 /* Last SC fence in thread i, before last SC fence in current thread */
953 ModelAction *last_sc_fence_thread_before = NULL;
954 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
955 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
957 /* Iterate over actions in thread, starting from most recent */
958 action_list_t *list = &(*thrd_lists)[i];
959 sllnode<ModelAction*>* rit;
960 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
961 ModelAction *act = rit->getVal();
964 * 1) If RMW and it actually read from something, then we
965 * already have all relevant edges, so just skip to next
968 * 2) If RMW and it didn't read from anything, we should
969 * whatever edge we can get to speed up convergence.
971 * 3) If normal write, we need to look at earlier actions, so
972 * continue processing list.
974 if (curr->is_rmw()) {
975 if (curr->get_reads_from() != NULL)
983 /* C++, Section 29.3 statement 7 */
984 if (last_sc_fence_thread_before && act->is_write() &&
985 *act < *last_sc_fence_thread_before) {
986 edgeset.push_back(act);
991 * Include at most one act per-thread that "happens
994 if (act->happens_before(curr)) {
996 * Note: if act is RMW, just add edge:
998 * The following edge should be handled elsewhere:
999 * readfrom(act) --mo--> act
1001 if (act->is_write())
1002 edgeset.push_back(act);
1003 else if (act->is_read()) {
1004 //if previous read accessed a null, just keep going
1005 edgeset.push_back(act->get_reads_from());
1011 mo_graph->addEdges(&edgeset, curr);
1016 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1017 * some constraints. This method checks one the following constraint (others
1018 * require compiler support):
1020 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1021 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1023 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1025 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1027 /* Iterate over all threads */
1028 for (i = 0;i < thrd_lists->size();i++) {
1029 const ModelAction *write_after_read = NULL;
1031 /* Iterate over actions in thread, starting from most recent */
1032 action_list_t *list = &(*thrd_lists)[i];
1033 sllnode<ModelAction *>* rit;
1034 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1035 ModelAction *act = rit->getVal();
1037 /* Don't disallow due to act == reader */
1038 if (!reader->happens_before(act) || reader == act)
1040 else if (act->is_write())
1041 write_after_read = act;
1042 else if (act->is_read() && act->get_reads_from() != NULL)
1043 write_after_read = act->get_reads_from();
1046 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1053 * Computes the clock vector that happens before propagates from this write.
1055 * @param rf The action that might be part of a release sequence. Must be a
1057 * @return ClockVector of happens before relation.
1060 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1061 SnapVector<ModelAction *> * processset = NULL;
1062 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1063 ASSERT(rf->is_write());
1064 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1066 if (processset == NULL)
1067 processset = new SnapVector<ModelAction *>();
1068 processset->push_back(rf);
1071 int i = (processset == NULL) ? 0 : processset->size();
1073 ClockVector * vec = NULL;
1075 if (rf->get_rfcv() != NULL) {
1076 vec = rf->get_rfcv();
1077 } else if (rf->is_acquire() && rf->is_release()) {
1079 } else if (rf->is_release() && !rf->is_rmw()) {
1081 } else if (rf->is_release()) {
1082 //have rmw that is release and doesn't have a rfcv
1083 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1086 //operation that isn't release
1087 if (rf->get_last_fence_release()) {
1089 vec = rf->get_last_fence_release()->get_cv();
1091 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1097 rf = (*processset)[i];
1101 if (processset != NULL)
1107 * Performs various bookkeeping operations for the current ModelAction. For
1108 * instance, adds action to the per-object, per-thread action vector and to the
1109 * action trace list of all thread actions.
1111 * @param act is the ModelAction to add.
1113 void ModelExecution::add_action_to_lists(ModelAction *act)
1115 int tid = id_to_int(act->get_tid());
1116 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1117 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1118 act->setActionRef(list->add_back(act));
1121 // Update action trace, a total order of all actions
1122 act->setTraceRef(action_trace.add_back(act));
1125 // Update obj_thrd_map, a per location, per thread, order of actions
1126 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1127 if ((int)vec->size() <= tid) {
1128 uint oldsize = vec->size();
1129 vec->resize(priv->next_thread_id);
1130 for(uint i = oldsize;i < priv->next_thread_id;i++)
1131 new (&(*vec)[i]) action_list_t();
1133 act->setThrdMapRef((*vec)[tid].add_back(act));
1135 // Update thrd_last_action, the last action taken by each thread
1136 if ((int)thrd_last_action.size() <= tid)
1137 thrd_last_action.resize(get_num_threads());
1138 thrd_last_action[tid] = act;
1140 // Update thrd_last_fence_release, the last release fence taken by each thread
1141 if (act->is_fence() && act->is_release()) {
1142 if ((int)thrd_last_fence_release.size() <= tid)
1143 thrd_last_fence_release.resize(get_num_threads());
1144 thrd_last_fence_release[tid] = act;
1147 if (act->is_wait()) {
1148 void *mutex_loc = (void *) act->get_value();
1149 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1151 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1152 if ((int)vec->size() <= tid) {
1153 uint oldsize = vec->size();
1154 vec->resize(priv->next_thread_id);
1155 for(uint i = oldsize;i < priv->next_thread_id;i++)
1156 new (&(*vec)[i]) action_list_t();
1158 act->setThrdMapRef((*vec)[tid].add_back(act));
1162 sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
1163 sllnode<ModelAction*> * rit = list->end();
1164 modelclock_t next_seq = act->get_seq_number();
1165 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1166 return list->add_back(act);
1168 for(;rit != NULL;rit=rit->getPrev()) {
1169 if (rit->getVal()->get_seq_number() == next_seq) {
1170 return list->insertAfter(rit, act);
1177 sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1178 sllnode<ModelAction*> * rit = list->end();
1179 modelclock_t next_seq = act->get_seq_number();
1181 act->create_cv(NULL);
1183 } else if (rit->getVal()->get_seq_number() == next_seq) {
1184 act->create_cv(rit->getVal());
1185 return list->add_back(act);
1187 for(;rit != NULL;rit=rit->getPrev()) {
1188 if (rit->getVal()->get_seq_number() == next_seq) {
1189 act->create_cv(rit->getVal());
1190 return list->insertAfter(rit, act);
1198 * Performs various bookkeeping operations for a normal write. The
1199 * complication is that we are typically inserting a normal write
1200 * lazily, so we need to insert it into the middle of lists.
1202 * @param act is the ModelAction to add.
1205 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1207 int tid = id_to_int(act->get_tid());
1208 act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
1210 // Update obj_thrd_map, a per location, per thread, order of actions
1211 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1212 if (tid >= (int)vec->size()) {
1213 uint oldsize =vec->size();
1214 vec->resize(priv->next_thread_id);
1215 for(uint i=oldsize;i<priv->next_thread_id;i++)
1216 new (&(*vec)[i]) action_list_t();
1218 act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
1220 ModelAction * lastact = thrd_last_action[tid];
1221 // Update thrd_last_action, the last action taken by each thrad
1222 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1223 thrd_last_action[tid] = act;
1227 void ModelExecution::add_write_to_lists(ModelAction *write) {
1228 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1229 int tid = id_to_int(write->get_tid());
1230 if (tid >= (int)vec->size()) {
1231 uint oldsize =vec->size();
1232 vec->resize(priv->next_thread_id);
1233 for(uint i=oldsize;i<priv->next_thread_id;i++)
1234 new (&(*vec)[i]) action_list_t();
1236 write->setActionRef((*vec)[tid].add_back(write));
1240 * @brief Get the last action performed by a particular Thread
1241 * @param tid The thread ID of the Thread in question
1242 * @return The last action in the thread
1244 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1246 int threadid = id_to_int(tid);
1247 if (threadid < (int)thrd_last_action.size())
1248 return thrd_last_action[id_to_int(tid)];
1254 * @brief Get the last fence release performed by a particular Thread
1255 * @param tid The thread ID of the Thread in question
1256 * @return The last fence release in the thread, if one exists; NULL otherwise
1258 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1260 int threadid = id_to_int(tid);
1261 if (threadid < (int)thrd_last_fence_release.size())
1262 return thrd_last_fence_release[id_to_int(tid)];
1268 * Gets the last memory_order_seq_cst write (in the total global sequence)
1269 * performed on a particular object (i.e., memory location), not including the
1271 * @param curr The current ModelAction; also denotes the object location to
1273 * @return The last seq_cst write
1275 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1277 void *location = curr->get_location();
1278 return obj_last_sc_map.get(location);
1282 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1283 * performed in a particular thread, prior to a particular fence.
1284 * @param tid The ID of the thread to check
1285 * @param before_fence The fence from which to begin the search; if NULL, then
1286 * search for the most recent fence in the thread.
1287 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1289 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1291 /* All fences should have location FENCE_LOCATION */
1292 action_list_t *list = obj_map.get(FENCE_LOCATION);
1297 sllnode<ModelAction*>* rit = list->end();
1300 for (;rit != NULL;rit=rit->getPrev())
1301 if (rit->getVal() == before_fence)
1304 ASSERT(rit->getVal() == before_fence);
1308 for (;rit != NULL;rit=rit->getPrev()) {
1309 ModelAction *act = rit->getVal();
1310 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1317 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1318 * location). This function identifies the mutex according to the current
1319 * action, which is presumed to perform on the same mutex.
1320 * @param curr The current ModelAction; also denotes the object location to
1322 * @return The last unlock operation
1324 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1326 void *location = curr->get_location();
1328 action_list_t *list = obj_map.get(location);
1332 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1333 sllnode<ModelAction*>* rit;
1334 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1335 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1336 return rit->getVal();
1340 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1342 ModelAction *parent = get_last_action(tid);
1344 parent = get_thread(tid)->get_creation();
1349 * Returns the clock vector for a given thread.
1350 * @param tid The thread whose clock vector we want
1351 * @return Desired clock vector
1353 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1355 ModelAction *firstaction=get_parent_action(tid);
1356 return firstaction != NULL ? firstaction->get_cv() : NULL;
1359 bool valequals(uint64_t val1, uint64_t val2, int size) {
1362 return ((uint8_t)val1) == ((uint8_t)val2);
1364 return ((uint16_t)val1) == ((uint16_t)val2);
1366 return ((uint32_t)val1) == ((uint32_t)val2);
1376 * Build up an initial set of all past writes that this 'read' action may read
1377 * from, as well as any previously-observed future values that must still be valid.
1379 * @param curr is the current ModelAction that we are exploring; it must be a
1382 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1384 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1386 ASSERT(curr->is_read());
1388 ModelAction *last_sc_write = NULL;
1390 if (curr->is_seqcst())
1391 last_sc_write = get_last_seq_cst_write(curr);
1393 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1395 /* Iterate over all threads */
1396 if (thrd_lists != NULL)
1397 for (i = 0;i < thrd_lists->size();i++) {
1398 /* Iterate over actions in thread, starting from most recent */
1399 action_list_t *list = &(*thrd_lists)[i];
1400 sllnode<ModelAction *> * rit;
1401 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1402 ModelAction *act = rit->getVal();
1407 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1408 bool allow_read = true;
1410 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1413 /* Need to check whether we will have two RMW reading from the same value */
1414 if (curr->is_rmwr()) {
1415 /* It is okay if we have a failing CAS */
1416 if (!curr->is_rmwrcas() ||
1417 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1418 //Need to make sure we aren't the second RMW
1419 CycleNode * node = mo_graph->getNode_noCreate(act);
1420 if (node != NULL && node->getRMW() != NULL) {
1421 //we are the second RMW
1428 /* Only add feasible reads */
1429 rf_set->push_back(act);
1432 /* Include at most one act per-thread that "happens before" curr */
1433 if (act->happens_before(curr))
1438 if (DBG_ENABLED()) {
1439 model_print("Reached read action:\n");
1441 model_print("End printing read_from_past\n");
1446 static void print_list(action_list_t *list)
1448 sllnode<ModelAction*> *it;
1450 model_print("------------------------------------------------------------------------------------\n");
1451 model_print("# t Action type MO Location Value Rf CV\n");
1452 model_print("------------------------------------------------------------------------------------\n");
1454 unsigned int hash = 0;
1456 for (it = list->begin();it != NULL;it=it->getNext()) {
1457 const ModelAction *act = it->getVal();
1458 if (act->get_seq_number() > 0)
1460 hash = hash^(hash<<3)^(it->getVal()->hash());
1462 model_print("HASH %u\n", hash);
1463 model_print("------------------------------------------------------------------------------------\n");
1466 #if SUPPORT_MOD_ORDER_DUMP
1467 void ModelExecution::dumpGraph(char *filename)
1470 sprintf(buffer, "%s.dot", filename);
1471 FILE *file = fopen(buffer, "w");
1472 fprintf(file, "digraph %s {\n", filename);
1473 mo_graph->dumpNodes(file);
1474 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1476 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1477 ModelAction *act = it->getVal();
1478 if (act->is_read()) {
1479 mo_graph->dot_print_node(file, act);
1480 mo_graph->dot_print_edge(file,
1481 act->get_reads_from(),
1483 "label=\"rf\", color=red, weight=2");
1485 if (thread_array[act->get_tid()]) {
1486 mo_graph->dot_print_edge(file,
1487 thread_array[id_to_int(act->get_tid())],
1489 "label=\"sb\", color=blue, weight=400");
1492 thread_array[act->get_tid()] = act;
1494 fprintf(file, "}\n");
1495 model_free(thread_array);
1500 /** @brief Prints an execution trace summary. */
1501 void ModelExecution::print_summary()
1503 #if SUPPORT_MOD_ORDER_DUMP
1504 char buffername[100];
1505 sprintf(buffername, "exec%04u", get_execution_number());
1506 mo_graph->dumpGraphToFile(buffername);
1507 sprintf(buffername, "graph%04u", get_execution_number());
1508 dumpGraph(buffername);
1511 model_print("Execution trace %d:", get_execution_number());
1512 if (scheduler->all_threads_sleeping())
1513 model_print(" SLEEP-SET REDUNDANT");
1514 if (have_bug_reports())
1515 model_print(" DETECTED BUG(S)");
1519 print_list(&action_trace);
1525 * Add a Thread to the system for the first time. Should only be called once
1527 * @param t The Thread to add
1529 void ModelExecution::add_thread(Thread *t)
1531 unsigned int i = id_to_int(t->get_id());
1532 if (i >= thread_map.size())
1533 thread_map.resize(i + 1);
1535 if (!t->is_model_thread())
1536 scheduler->add_thread(t);
1540 * @brief Get a Thread reference by its ID
1541 * @param tid The Thread's ID
1542 * @return A Thread reference
1544 Thread * ModelExecution::get_thread(thread_id_t tid) const
1546 unsigned int i = id_to_int(tid);
1547 if (i < thread_map.size())
1548 return thread_map[i];
1553 * @brief Get a reference to the Thread in which a ModelAction was executed
1554 * @param act The ModelAction
1555 * @return A Thread reference
1557 Thread * ModelExecution::get_thread(const ModelAction *act) const
1559 return get_thread(act->get_tid());
1563 * @brief Get a Thread reference by its pthread ID
1564 * @param index The pthread's ID
1565 * @return A Thread reference
1567 Thread * ModelExecution::get_pthread(pthread_t pid) {
1573 uint32_t thread_id = x.v;
1574 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1579 * @brief Check if a Thread is currently enabled
1580 * @param t The Thread to check
1581 * @return True if the Thread is currently enabled
1583 bool ModelExecution::is_enabled(Thread *t) const
1585 return scheduler->is_enabled(t);
1589 * @brief Check if a Thread is currently enabled
1590 * @param tid The ID of the Thread to check
1591 * @return True if the Thread is currently enabled
1593 bool ModelExecution::is_enabled(thread_id_t tid) const
1595 return scheduler->is_enabled(tid);
1599 * @brief Select the next thread to execute based on the curren action
1601 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1602 * actions should be followed by the execution of their child thread. In either
1603 * case, the current action should determine the next thread schedule.
1605 * @param curr The current action
1606 * @return The next thread to run, if the current action will determine this
1607 * selection; otherwise NULL
1609 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1611 /* Do not split atomic RMW */
1612 if (curr->is_rmwr() && !paused_by_fuzzer(curr))
1613 return get_thread(curr);
1614 /* Follow CREATE with the created thread */
1615 /* which is not needed, because model.cc takes care of this */
1616 if (curr->get_type() == THREAD_CREATE)
1617 return curr->get_thread_operand();
1618 if (curr->get_type() == PTHREAD_CREATE) {
1619 return curr->get_thread_operand();
1624 /** @param act A read atomic action */
1625 bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
1627 ASSERT(act->is_read());
1629 // Actions paused by fuzzer have their sequence number reset to 0
1630 return act->get_seq_number() == 0;
1634 * Takes the next step in the execution, if possible.
1635 * @param curr The current step to take
1636 * @return Returns the next Thread to run, if any; NULL if this execution
1639 Thread * ModelExecution::take_step(ModelAction *curr)
1641 Thread *curr_thrd = get_thread(curr);
1642 ASSERT(curr_thrd->get_state() == THREAD_READY);
1644 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1645 curr = check_current_action(curr);
1648 /* Process this action in ModelHistory for records */
1649 model->get_history()->process_action( curr, curr->get_tid() );
1651 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1652 scheduler->remove_thread(curr_thrd);
1654 return action_select_next_thread(curr);
1657 void ModelExecution::removeAction(ModelAction *act) {
1659 sllnode<ModelAction *> * listref = act->getTraceRef();
1660 if (listref != NULL) {
1661 action_trace.erase(listref);
1665 sllnode<ModelAction *> * listref = act->getThrdMapRef();
1666 if (listref != NULL) {
1667 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1668 (*vec)[act->get_tid()].erase(listref);
1671 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1672 sllnode<ModelAction *> * listref = act->getActionRef();
1673 if (listref != NULL) {
1674 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1675 list->erase(listref);
1677 } else if (act->is_wait()) {
1678 sllnode<ModelAction *> * listref = act->getActionRef();
1679 if (listref != NULL) {
1680 void *mutex_loc = (void *) act->get_value();
1681 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1683 } else if (act->is_write()) {
1684 sllnode<ModelAction *> * listref = act->getActionRef();
1685 if (listref != NULL) {
1686 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1687 (*vec)[act->get_tid()].erase(listref);
1689 //Remove from Cyclegraph
1690 mo_graph->freeAction(act);
1694 ClockVector * ModelExecution::computeMinimalCV() {
1695 ClockVector *cvmin = NULL;
1696 for(unsigned int i = 0;i < thread_map.size();i++) {
1697 Thread * t = thread_map[i];
1698 if (t->get_state() == THREAD_COMPLETED)
1700 thread_id_t tid = int_to_id(i);
1701 ClockVector * cv = get_cv(tid);
1703 cvmin = new ClockVector(cv, NULL);
1705 cvmin->minmerge(cv);
1711 //How often to check for memory
1712 //How much of the trace to always keep
1713 //Whether to sacrifice completeness...i.e., remove visible writes
1715 void ModelExecution::collectActions() {
1716 //Compute minimal clock vector for all live threads
1717 ClockVector *cvmin = computeMinimalCV();
1718 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1719 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1721 //Next walk action trace... When we hit an action, see if it is
1722 //invisible (e.g., earlier than the first before the minimum
1723 //clock for the thread... if so erase it and all previous
1724 //actions in cyclegraph
1725 sllnode<ModelAction*> * it;
1726 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1727 ModelAction *act = it->getVal();
1728 modelclock_t actseq = act->get_seq_number();
1730 //See if we are done
1731 if (actseq > maxtofree)
1734 thread_id_t act_tid = act->get_tid();
1735 modelclock_t tid_clock = cvmin->getClock(act_tid);
1736 if (actseq <= tid_clock || params->removevisible) {
1737 ModelAction * write;
1738 if (act->is_write()) {
1740 } else if (act->is_read()) {
1741 write = act->get_reads_from();
1745 //Mark everything earlier in MO graph to be freed
1746 CycleNode * cn = mo_graph->getNode_noCreate(write);
1747 queue->push_back(cn);
1748 while(!queue->empty()) {
1749 CycleNode * node = queue->back();
1751 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1752 CycleNode * prevnode = node->getInEdge(i);
1753 ModelAction * prevact = prevnode->getAction();
1754 if (prevact->get_type() != READY_FREE) {
1755 prevact->set_free();
1756 queue->push_back(prevnode);
1762 for (;it != NULL;it=it->getPrev()) {
1763 ModelAction *act = it->getVal();
1764 if (act->is_free()) {
1767 } else if (act->is_read()) {
1768 if (act->get_reads_from()->is_free()) {
1772 const ModelAction *rel_fence =act->get_last_fence_release();
1773 if (rel_fence != NULL) {
1774 modelclock_t relfenceseq = rel_fence->get_seq_number();
1775 thread_id_t relfence_tid = rel_fence->get_tid();
1776 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1777 //Remove references to irrelevant release fences
1778 if (relfenceseq <= tid_clock)
1779 act->set_last_fence_release(NULL);
1782 } else if (act->is_fence()) {
1783 //Note that acquire fences can always be safely
1784 //removed, but could incur extra overheads in
1785 //traversals. Removing them before the cvmin seems
1786 //like a good compromise.
1788 //Release fences before the cvmin don't do anything
1789 //because everyone has already synchronized.
1791 //Sequentially fences before cvmin are redundant
1792 //because happens-before will enforce same
1795 modelclock_t actseq = act->get_seq_number();
1796 thread_id_t act_tid = act->get_tid();
1797 modelclock_t tid_clock = cvmin->getClock(act_tid);
1798 if (actseq <= tid_clock) {
1803 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish
1804 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1805 //need to keep most recent unlock/wait for each lock
1806 if(act->is_unlock() || act->is_wait()) {
1807 ModelAction * lastlock = get_last_unlock(act);
1808 if (lastlock != act) {
1825 Fuzzer * ModelExecution::getFuzzer() {