11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 static unsigned int atomic_load_count = 0;
24 static unsigned int atomic_store_count = 0;
25 static unsigned int atomic_rmw_count = 0;
27 static unsigned int atomic_fence_count = 0;
28 static unsigned int atomic_lock_count = 0;
29 static unsigned int atomic_trylock_count = 0;
30 static unsigned int atomic_unlock_count = 0;
31 static unsigned int atomic_notify_count = 0;
32 static unsigned int atomic_wait_count = 0;
33 static unsigned int atomic_timedwait_count = 0;
37 * Structure for holding small ModelChecker members that should be snapshotted
39 struct model_snapshot_members {
40 model_snapshot_members() :
41 /* First thread created will have id INITIAL_THREAD_ID */
42 next_thread_id(INITIAL_THREAD_ID),
43 used_sequence_numbers(0),
48 ~model_snapshot_members() {
49 for (unsigned int i = 0;i < bugs.size();i++)
54 unsigned int next_thread_id;
55 modelclock_t used_sequence_numbers;
56 SnapVector<bug_message *> bugs;
57 /** @brief Incorrectly-ordered synchronization was made */
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
68 thread_map(2), /* We'll always need at least 2 threads */
73 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 priv(new struct model_snapshot_members ()),
82 mo_graph(new CycleGraph()),
84 fuzzer(new NewFuzzer()),
90 /* Initialize a model-checker thread, for special ModelActions */
91 model_thread = new Thread(get_next_id());
92 add_thread(model_thread);
93 fuzzer->register_engine(m, this);
94 scheduler->register_engine(this);
96 pthread_key_create(&pthreadkey, tlsdestructor);
100 /** @brief Destructor */
101 ModelExecution::~ModelExecution()
103 for (unsigned int i = 0;i < get_num_threads();i++)
104 delete get_thread(int_to_id(i));
110 int ModelExecution::get_execution_number() const
112 return model->get_execution_number();
115 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
117 SnapVector<action_list_t> *tmp = hash->get(ptr);
119 tmp = new SnapVector<action_list_t>();
125 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
127 simple_action_list_t *tmp = hash->get(ptr);
129 tmp = new simple_action_list_t();
135 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
137 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
139 tmp = new SnapVector<simple_action_list_t>();
146 * When vectors of action lists are reallocated due to resize, the root address of
147 * action lists may change. Hence we need to fix the parent pointer of the children
150 static void fixup_action_list(SnapVector<action_list_t> * vec)
152 for (uint i = 0;i < vec->size();i++) {
153 action_list_t * list = &(*vec)[i];
160 static inline void record_atomic_stats(ModelAction * act)
162 switch (act->get_type()) {
164 atomic_store_count++;
173 atomic_fence_count++;
179 atomic_trylock_count++;
182 atomic_unlock_count++;
184 case ATOMIC_NOTIFY_ONE:
185 case ATOMIC_NOTIFY_ALL:
186 atomic_notify_count++;
191 case ATOMIC_TIMEDWAIT:
192 atomic_timedwait_count++;
198 void print_atomic_accesses()
200 model_print("atomic store count: %u\n", atomic_store_count);
201 model_print("atomic load count: %u\n", atomic_load_count);
202 model_print("atomic rmw count: %u\n", atomic_rmw_count);
204 model_print("atomic fence count: %u\n", atomic_fence_count);
205 model_print("atomic lock count: %u\n", atomic_lock_count);
206 model_print("atomic trylock count: %u\n", atomic_trylock_count);
207 model_print("atomic unlock count: %u\n", atomic_unlock_count);
208 model_print("atomic notify count: %u\n", atomic_notify_count);
209 model_print("atomic wait count: %u\n", atomic_wait_count);
210 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
213 /** @return a thread ID for a new Thread */
214 thread_id_t ModelExecution::get_next_id()
216 return priv->next_thread_id++;
219 /** @return the number of user threads created during this execution */
220 unsigned int ModelExecution::get_num_threads() const
222 return priv->next_thread_id;
225 /** @return a sequence number for a new ModelAction */
226 modelclock_t ModelExecution::get_next_seq_num()
228 return ++priv->used_sequence_numbers;
231 /** @return a sequence number for a new ModelAction */
232 modelclock_t ModelExecution::get_curr_seq_num()
234 return priv->used_sequence_numbers;
237 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
238 void ModelExecution::restore_last_seq_num()
240 priv->used_sequence_numbers--;
244 * @brief Should the current action wake up a given thread?
246 * @param curr The current action
247 * @param thread The thread that we might wake up
248 * @return True, if we should wake up the sleeping thread; false otherwise
250 bool ModelExecution::should_wake_up(const Thread *thread) const
252 const ModelAction *asleep = thread->get_pending();
254 /* The sleep is literally sleeping */
255 switch (asleep->get_type()) {
257 if (fuzzer->shouldWake(asleep))
261 case ATOMIC_TIMEDWAIT:
262 if (fuzzer->waitShouldWakeUp(asleep))
272 void ModelExecution::wake_up_sleeping_actions()
274 for (unsigned int i = 0;i < get_num_threads();i++) {
275 thread_id_t tid = int_to_id(i);
276 if (scheduler->is_sleep_set(tid)) {
277 Thread *thr = get_thread(tid);
278 if (should_wake_up(thr)) {
279 /* Remove this thread from sleep set */
280 scheduler->remove_sleep(thr);
281 ModelAction * pending = thr->get_pending();
282 if (pending->is_sleep()) {
283 thr->set_wakeup_state(true);
284 } else if (pending->is_wait()) {
285 thr->set_wakeup_state(true);
286 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location());
287 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
288 if (rit->getVal()->get_tid() == tid) {
299 void ModelExecution::assert_bug(const char *msg)
301 priv->bugs.push_back(new bug_message(msg));
305 /** @return True, if any bugs have been reported for this execution */
306 bool ModelExecution::have_bug_reports() const
308 return priv->bugs.size() != 0;
311 SnapVector<bug_message *> * ModelExecution::get_bugs() const
317 * Check whether the current trace has triggered an assertion which should halt
320 * @return True, if the execution should be aborted; false otherwise
322 bool ModelExecution::has_asserted() const
324 return priv->asserted;
328 * Trigger a trace assertion which should cause this execution to be halted.
329 * This can be due to a detected bug or due to an infeasibility that should
332 void ModelExecution::set_assert()
334 priv->asserted = true;
338 * Check if we are in a deadlock. Should only be called at the end of an
339 * execution, although it should not give false positives in the middle of an
340 * execution (there should be some ENABLED thread).
342 * @return True if program is in a deadlock; false otherwise
344 bool ModelExecution::is_deadlocked() const
346 bool blocking_threads = false;
347 for (unsigned int i = 0;i < get_num_threads();i++) {
348 thread_id_t tid = int_to_id(i);
351 Thread *t = get_thread(tid);
352 if (!t->is_model_thread() && t->get_pending())
353 blocking_threads = true;
355 return blocking_threads;
359 * Check if this is a complete execution. That is, have all thread completed
360 * execution (rather than exiting because sleep sets have forced a redundant
363 * @return True if the execution is complete.
365 bool ModelExecution::is_complete_execution() const
367 for (unsigned int i = 0;i < get_num_threads();i++)
368 if (is_enabled(int_to_id(i)))
373 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
374 uint64_t value = *((const uint64_t *) location);
375 modelclock_t storeclock;
376 thread_id_t storethread;
377 getStoreThreadAndClock(location, &storethread, &storeclock);
378 setAtomicStoreFlag(location);
379 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
380 act->set_seq_number(storeclock);
381 add_normal_write_to_lists(act);
382 add_write_to_lists(act);
383 w_modification_order(act);
385 model->get_history()->process_action(act, act->get_tid());
391 * Processes a read model action.
392 * @param curr is the read model action to process.
393 * @param rf_set is the set of model actions we can possibly read from
394 * @return True if the read can be pruned from the thread map list.
396 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
398 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
399 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
400 if (hasnonatomicstore) {
401 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
402 rf_set->push_back(nonatomicstore);
405 // Remove writes that violate read modification order
408 while (i < rf_set->size()) {
409 ModelAction * rf = (*rf_set)[i];
410 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
411 (*rf_set)[i] = rf_set->back();
418 int index = fuzzer->selectWrite(curr, rf_set);
420 ModelAction *rf = (*rf_set)[index];
423 bool canprune = false;
424 if (r_modification_order(curr, rf, priorset, &canprune)) {
425 for(unsigned int i=0;i<priorset->size();i++) {
426 mo_graph->addEdge((*priorset)[i], rf);
429 get_thread(curr)->set_return_value(rf->get_write_value());
431 //Update acquire fence clock vector
432 ClockVector * hbcv = get_hb_from_write(rf);
434 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
435 return canprune && (curr->get_type() == ATOMIC_READ);
438 (*rf_set)[index] = rf_set->back();
444 * Processes a lock, trylock, or unlock model action. @param curr is
445 * the read model action to process.
447 * The try lock operation checks whether the lock is taken. If not,
448 * it falls to the normal lock operation case. If so, it returns
451 * The lock operation has already been checked that it is enabled, so
452 * it just grabs the lock and synchronizes with the previous unlock.
454 * The unlock operation has to re-enable all of the threads that are
455 * waiting on the lock.
457 * @return True if synchronization was updated; false otherwise
459 bool ModelExecution::process_mutex(ModelAction *curr)
461 cdsc::mutex *mutex = curr->get_mutex();
462 struct cdsc::mutex_state *state = NULL;
465 state = mutex->get_state();
467 switch (curr->get_type()) {
468 case ATOMIC_TRYLOCK: {
469 bool success = !state->locked;
470 curr->set_try_lock(success);
472 get_thread(curr)->set_return_value(0);
475 get_thread(curr)->set_return_value(1);
477 //otherwise fall into the lock case
479 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
480 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
481 // assert_bug("Lock access before initialization");
483 // TODO: lock count for recursive mutexes
484 state->locked = get_thread(curr);
485 ModelAction *unlock = get_last_unlock(curr);
486 //synchronize with the previous unlock statement
487 if (unlock != NULL) {
488 synchronize(unlock, curr);
494 Thread *curr_thrd = get_thread(curr);
495 /* wake up the other threads */
496 for (unsigned int i = 0;i < get_num_threads();i++) {
497 Thread *t = get_thread(int_to_id(i));
498 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
502 /* unlock the lock - after checking who was waiting on it */
503 state->locked = NULL;
505 /* disable this thread */
506 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
507 waiters->push_back(curr);
508 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
510 if (fuzzer->waitShouldFail(curr))
511 scheduler->add_sleep(curr_thrd); // Place this thread into THREAD_SLEEP_SET
513 scheduler->sleep(curr_thrd);
517 case ATOMIC_TIMEDWAIT:
518 case ATOMIC_UNLOCK: {
519 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
520 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
521 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
522 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
523 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
524 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
525 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
526 //MUST EVENMTUALLY RELEASE...
528 // TODO: lock count for recursive mutexes
529 /* wake up the other threads */
530 Thread *curr_thrd = get_thread(curr);
531 for (unsigned int i = 0;i < get_num_threads();i++) {
532 Thread *t = get_thread(int_to_id(i));
533 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
537 /* unlock the lock - after checking who was waiting on it */
538 state->locked = NULL;
541 case ATOMIC_NOTIFY_ALL: {
542 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
543 //activate all the waiting threads
544 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
545 Thread * thread = get_thread(rit->getVal());
546 if (thread->get_state() != THREAD_COMPLETED)
547 scheduler->wake(thread);
548 thread->set_wakeup_state(true);
553 case ATOMIC_NOTIFY_ONE: {
554 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
555 if (waiters->size() != 0) {
556 Thread * thread = fuzzer->selectNotify(waiters);
557 if (thread->get_state() != THREAD_COMPLETED)
558 scheduler->wake(thread);
559 thread->set_wakeup_state(true);
571 * Process a write ModelAction
572 * @param curr The ModelAction to process
573 * @return True if the mo_graph was updated or promises were resolved
575 void ModelExecution::process_write(ModelAction *curr)
577 w_modification_order(curr);
578 get_thread(curr)->set_return_value(VALUE_NONE);
582 * Process a fence ModelAction
583 * @param curr The ModelAction to process
584 * @return True if synchronization was updated
586 void ModelExecution::process_fence(ModelAction *curr)
589 * fence-relaxed: no-op
590 * fence-release: only log the occurence (not in this function), for
591 * use in later synchronization
592 * fence-acquire (this function): search for hypothetical release
594 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
596 if (curr->is_acquire()) {
597 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
602 * @brief Process the current action for thread-related activity
604 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
605 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
606 * synchronization, etc. This function is a no-op for non-THREAD actions
607 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
609 * @param curr The current action
610 * @return True if synchronization was updated or a thread completed
612 void ModelExecution::process_thread_action(ModelAction *curr)
614 switch (curr->get_type()) {
615 case THREAD_CREATE: {
616 thrd_t *thrd = (thrd_t *)curr->get_location();
617 struct thread_params *params = (struct thread_params *)curr->get_value();
618 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
619 curr->set_thread_operand(th);
621 th->set_creation(curr);
624 case PTHREAD_CREATE: {
625 (*(uint32_t *)curr->get_location()) = pthread_counter++;
627 struct pthread_params *params = (struct pthread_params *)curr->get_value();
628 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
629 curr->set_thread_operand(th);
631 th->set_creation(curr);
633 if ( pthread_map.size() < pthread_counter )
634 pthread_map.resize( pthread_counter );
635 pthread_map[ pthread_counter-1 ] = th;
640 Thread *blocking = curr->get_thread_operand();
641 ModelAction *act = get_last_action(blocking->get_id());
642 synchronize(act, curr);
646 Thread *blocking = curr->get_thread_operand();
647 ModelAction *act = get_last_action(blocking->get_id());
648 synchronize(act, curr);
652 case THREADONLY_FINISH:
653 case THREAD_FINISH: {
654 Thread *th = get_thread(curr);
655 if (curr->get_type() == THREAD_FINISH &&
656 th == model->getInitThread()) {
662 /* Wake up any joining threads */
663 for (unsigned int i = 0;i < get_num_threads();i++) {
664 Thread *waiting = get_thread(int_to_id(i));
665 if (waiting->waiting_on() == th &&
666 waiting->get_pending()->is_thread_join())
667 scheduler->wake(waiting);
676 Thread *th = get_thread(curr);
677 th->set_pending(curr);
678 scheduler->add_sleep(th);
687 * Initialize the current action by performing one or more of the following
688 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
689 * manipulating backtracking sets, allocating and
690 * initializing clock vectors, and computing the promises to fulfill.
692 * @param curr The current action, as passed from the user context; may be
693 * freed/invalidated after the execution of this function, with a different
694 * action "returned" its place (pass-by-reference)
695 * @return True if curr is a newly-explored action; false otherwise
697 bool ModelExecution::initialize_curr_action(ModelAction **curr)
699 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
700 ModelAction *newcurr = process_rmw(*curr);
706 ModelAction *newcurr = *curr;
708 newcurr->set_seq_number(get_next_seq_num());
709 /* Always compute new clock vector */
710 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
712 /* Assign most recent release fence */
713 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
715 return true; /* This was a new ModelAction */
720 * @brief Establish reads-from relation between two actions
722 * Perform basic operations involved with establishing a concrete rf relation,
723 * including setting the ModelAction data and checking for release sequences.
725 * @param act The action that is reading (must be a read)
726 * @param rf The action from which we are reading (must be a write)
728 * @return True if this read established synchronization
731 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
734 ASSERT(rf->is_write());
736 act->set_read_from(rf);
737 if (act->is_acquire()) {
738 ClockVector *cv = get_hb_from_write(rf);
741 act->get_cv()->merge(cv);
746 * @brief Synchronizes two actions
748 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
749 * This function performs the synchronization as well as providing other hooks
750 * for other checks along with synchronization.
752 * @param first The left-hand side of the synchronizes-with relation
753 * @param second The right-hand side of the synchronizes-with relation
754 * @return True if the synchronization was successful (i.e., was consistent
755 * with the execution order); false otherwise
757 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
759 if (*second < *first) {
760 ASSERT(0); //This should not happend
763 return second->synchronize_with(first);
767 * @brief Check whether a model action is enabled.
769 * Checks whether an operation would be successful (i.e., is a lock already
770 * locked, or is the joined thread already complete).
772 * For yield-blocking, yields are never enabled.
774 * @param curr is the ModelAction to check whether it is enabled.
775 * @return a bool that indicates whether the action is enabled.
777 bool ModelExecution::check_action_enabled(ModelAction *curr) {
778 switch (curr->get_type()) {
780 cdsc::mutex *lock = curr->get_mutex();
781 struct cdsc::mutex_state *state = lock->get_state();
783 Thread *lock_owner = (Thread *)state->locked;
784 Thread *curr_thread = get_thread(curr);
785 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
795 Thread *blocking = curr->get_thread_operand();
796 if (!blocking->is_complete()) {
802 if (!fuzzer->shouldSleep(curr))
814 * This is the heart of the model checker routine. It performs model-checking
815 * actions corresponding to a given "current action." Among other processes, it
816 * calculates reads-from relationships, updates synchronization clock vectors,
817 * forms a memory_order constraints graph, and handles replay/backtrack
818 * execution when running permutations of previously-observed executions.
820 * @param curr The current action to process
821 * @return The ModelAction that is actually executed; may be different than
824 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
827 bool newly_explored = initialize_curr_action(&curr);
831 wake_up_sleeping_actions();
833 SnapVector<ModelAction *> * rf_set = NULL;
834 bool canprune = false;
835 /* Build may_read_from set for newly-created actions */
836 if (curr->is_read() && newly_explored) {
837 rf_set = build_may_read_from(curr);
838 canprune = process_read(curr, rf_set);
841 ASSERT(rf_set == NULL);
843 /* Add the action to lists if not the second part of a rmw */
844 if (newly_explored) {
846 record_atomic_stats(curr);
848 add_action_to_lists(curr, canprune);
851 if (curr->is_write())
852 add_write_to_lists(curr);
854 process_thread_action(curr);
856 if (curr->is_write())
859 if (curr->is_fence())
862 if (curr->is_mutex_op())
868 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
869 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
870 ModelAction *lastread = get_last_action(act->get_tid());
871 lastread->process_rmw(act);
873 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
879 * @brief Updates the mo_graph with the constraints imposed from the current
882 * Basic idea is the following: Go through each other thread and find
883 * the last action that happened before our read. Two cases:
885 * -# The action is a write: that write must either occur before
886 * the write we read from or be the write we read from.
887 * -# The action is a read: the write that that action read from
888 * must occur before the write we read from or be the same write.
890 * @param curr The current action. Must be a read.
891 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
892 * @param check_only If true, then only check whether the current action satisfies
893 * read modification order or not, without modifiying priorset and canprune.
895 * @return True if modification order edges were added; false otherwise
898 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
899 SnapVector<ModelAction *> * priorset, bool * canprune)
901 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
902 ASSERT(curr->is_read());
904 /* Last SC fence in the current thread */
905 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
907 int tid = curr->get_tid();
909 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
910 if ((int)thrd_lists->size() <= tid) {
911 uint oldsize = thrd_lists->size();
912 thrd_lists->resize(priv->next_thread_id);
913 for(uint i = oldsize;i < priv->next_thread_id;i++)
914 new (&(*thrd_lists)[i]) action_list_t();
916 fixup_action_list(thrd_lists);
919 ModelAction *prev_same_thread = NULL;
920 /* Iterate over all threads */
921 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
922 /* Last SC fence in thread tid */
923 ModelAction *last_sc_fence_thread_local = NULL;
925 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
927 /* Last SC fence in thread tid, before last SC fence in current thread */
928 ModelAction *last_sc_fence_thread_before = NULL;
929 if (last_sc_fence_local)
930 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
932 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
933 if (prev_same_thread != NULL &&
934 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
935 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
939 /* Iterate over actions in thread, starting from most recent */
940 action_list_t *list = &(*thrd_lists)[tid];
941 sllnode<ModelAction *> * rit;
942 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
943 ModelAction *act = rit->getVal();
948 /* Don't want to add reflexive edges on 'rf' */
949 if (act->equals(rf)) {
950 if (act->happens_before(curr))
956 if (act->is_write()) {
957 /* C++, Section 29.3 statement 5 */
958 if (curr->is_seqcst() && last_sc_fence_thread_local &&
959 *act < *last_sc_fence_thread_local) {
960 if (mo_graph->checkReachable(rf, act))
962 priorset->push_back(act);
965 /* C++, Section 29.3 statement 4 */
966 else if (act->is_seqcst() && last_sc_fence_local &&
967 *act < *last_sc_fence_local) {
968 if (mo_graph->checkReachable(rf, act))
970 priorset->push_back(act);
973 /* C++, Section 29.3 statement 6 */
974 else if (last_sc_fence_thread_before &&
975 *act < *last_sc_fence_thread_before) {
976 if (mo_graph->checkReachable(rf, act))
978 priorset->push_back(act);
984 * Include at most one act per-thread that "happens
987 if (act->happens_before(curr)) {
989 if (last_sc_fence_local == NULL ||
990 (*last_sc_fence_local < *act)) {
991 prev_same_thread = act;
994 if (act->is_write()) {
995 if (mo_graph->checkReachable(rf, act))
997 priorset->push_back(act);
999 ModelAction *prevrf = act->get_reads_from();
1000 if (!prevrf->equals(rf)) {
1001 if (mo_graph->checkReachable(rf, prevrf))
1003 priorset->push_back(prevrf);
1005 if (act->get_tid() == curr->get_tid()) {
1006 //Can prune curr from obj list
1019 * Updates the mo_graph with the constraints imposed from the current write.
1021 * Basic idea is the following: Go through each other thread and find
1022 * the lastest action that happened before our write. Two cases:
1024 * (1) The action is a write => that write must occur before
1027 * (2) The action is a read => the write that that action read from
1028 * must occur before the current write.
1030 * This method also handles two other issues:
1032 * (I) Sequential Consistency: Making sure that if the current write is
1033 * seq_cst, that it occurs after the previous seq_cst write.
1035 * (II) Sending the write back to non-synchronizing reads.
1037 * @param curr The current action. Must be a write.
1038 * @param send_fv A vector for stashing reads to which we may pass our future
1039 * value. If NULL, then don't record any future values.
1040 * @return True if modification order edges were added; false otherwise
1042 void ModelExecution::w_modification_order(ModelAction *curr)
1044 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1046 ASSERT(curr->is_write());
1048 SnapList<ModelAction *> edgeset;
1050 if (curr->is_seqcst()) {
1051 /* We have to at least see the last sequentially consistent write,
1052 so we are initialized. */
1053 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1054 if (last_seq_cst != NULL) {
1055 edgeset.push_back(last_seq_cst);
1057 //update map for next query
1058 obj_last_sc_map.put(curr->get_location(), curr);
1061 /* Last SC fence in the current thread */
1062 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1064 /* Iterate over all threads */
1065 for (i = 0;i < thrd_lists->size();i++) {
1066 /* Last SC fence in thread i, before last SC fence in current thread */
1067 ModelAction *last_sc_fence_thread_before = NULL;
1068 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1069 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1071 /* Iterate over actions in thread, starting from most recent */
1072 action_list_t *list = &(*thrd_lists)[i];
1073 sllnode<ModelAction*>* rit;
1074 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1075 ModelAction *act = rit->getVal();
1078 * 1) If RMW and it actually read from something, then we
1079 * already have all relevant edges, so just skip to next
1082 * 2) If RMW and it didn't read from anything, we should
1083 * whatever edge we can get to speed up convergence.
1085 * 3) If normal write, we need to look at earlier actions, so
1086 * continue processing list.
1088 if (curr->is_rmw()) {
1089 if (curr->get_reads_from() != NULL)
1097 /* C++, Section 29.3 statement 7 */
1098 if (last_sc_fence_thread_before && act->is_write() &&
1099 *act < *last_sc_fence_thread_before) {
1100 edgeset.push_back(act);
1105 * Include at most one act per-thread that "happens
1108 if (act->happens_before(curr)) {
1110 * Note: if act is RMW, just add edge:
1112 * The following edge should be handled elsewhere:
1113 * readfrom(act) --mo--> act
1115 if (act->is_write())
1116 edgeset.push_back(act);
1117 else if (act->is_read()) {
1118 //if previous read accessed a null, just keep going
1119 edgeset.push_back(act->get_reads_from());
1125 mo_graph->addEdges(&edgeset, curr);
1130 * Computes the clock vector that happens before propagates from this write.
1132 * @param rf The action that might be part of a release sequence. Must be a
1134 * @return ClockVector of happens before relation.
1137 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1138 SnapVector<ModelAction *> * processset = NULL;
1139 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1140 ASSERT(rf->is_write());
1141 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1143 if (processset == NULL)
1144 processset = new SnapVector<ModelAction *>();
1145 processset->push_back(rf);
1148 int i = (processset == NULL) ? 0 : processset->size();
1150 ClockVector * vec = NULL;
1152 if (rf->get_rfcv() != NULL) {
1153 vec = rf->get_rfcv();
1154 } else if (rf->is_acquire() && rf->is_release()) {
1156 } else if (rf->is_release() && !rf->is_rmw()) {
1158 } else if (rf->is_release()) {
1159 //have rmw that is release and doesn't have a rfcv
1160 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1163 //operation that isn't release
1164 if (rf->get_last_fence_release()) {
1166 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1168 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1172 vec = new ClockVector(NULL, NULL);
1175 vec = new ClockVector(vec, NULL);
1182 rf = (*processset)[i];
1186 if (processset != NULL)
1192 * Performs various bookkeeping operations for the current ModelAction. For
1193 * instance, adds action to the per-object, per-thread action vector and to the
1194 * action trace list of all thread actions.
1196 * @param act is the ModelAction to add.
1198 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1200 int tid = id_to_int(act->get_tid());
1201 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1202 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1203 act->setActionRef(list->add_back(act));
1206 // Update action trace, a total order of all actions
1207 action_trace.addAction(act);
1210 // Update obj_thrd_map, a per location, per thread, order of actions
1211 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1212 if ((int)vec->size() <= tid) {
1213 uint oldsize = vec->size();
1214 vec->resize(priv->next_thread_id);
1215 for(uint i = oldsize;i < priv->next_thread_id;i++)
1216 new (&(*vec)[i]) action_list_t();
1218 fixup_action_list(vec);
1220 if (!canprune && (act->is_read() || act->is_write()))
1221 (*vec)[tid].addAction(act);
1223 // Update thrd_last_action, the last action taken by each thread
1224 if ((int)thrd_last_action.size() <= tid)
1225 thrd_last_action.resize(get_num_threads());
1226 thrd_last_action[tid] = act;
1228 // Update thrd_last_fence_release, the last release fence taken by each thread
1229 if (act->is_fence() && act->is_release()) {
1230 if ((int)thrd_last_fence_release.size() <= tid)
1231 thrd_last_fence_release.resize(get_num_threads());
1232 thrd_last_fence_release[tid] = act;
1235 if (act->is_wait()) {
1236 void *mutex_loc = (void *) act->get_value();
1237 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1241 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1242 list->addAction(act);
1245 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1246 act->create_cv(NULL);
1247 list->addAction(act);
1251 * Performs various bookkeeping operations for a normal write. The
1252 * complication is that we are typically inserting a normal write
1253 * lazily, so we need to insert it into the middle of lists.
1255 * @param act is the ModelAction to add.
1258 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1260 int tid = id_to_int(act->get_tid());
1261 insertIntoActionListAndSetCV(&action_trace, act);
1263 // Update obj_thrd_map, a per location, per thread, order of actions
1264 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1265 if (tid >= (int)vec->size()) {
1266 uint oldsize =vec->size();
1267 vec->resize(priv->next_thread_id);
1268 for(uint i=oldsize;i<priv->next_thread_id;i++)
1269 new (&(*vec)[i]) action_list_t();
1271 fixup_action_list(vec);
1273 insertIntoActionList(&(*vec)[tid],act);
1275 ModelAction * lastact = thrd_last_action[tid];
1276 // Update thrd_last_action, the last action taken by each thrad
1277 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1278 thrd_last_action[tid] = act;
1282 void ModelExecution::add_write_to_lists(ModelAction *write) {
1283 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1284 int tid = id_to_int(write->get_tid());
1285 if (tid >= (int)vec->size()) {
1286 uint oldsize =vec->size();
1287 vec->resize(priv->next_thread_id);
1288 for(uint i=oldsize;i<priv->next_thread_id;i++)
1289 new (&(*vec)[i]) simple_action_list_t();
1291 write->setActionRef((*vec)[tid].add_back(write));
1295 * @brief Get the last action performed by a particular Thread
1296 * @param tid The thread ID of the Thread in question
1297 * @return The last action in the thread
1299 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1301 int threadid = id_to_int(tid);
1302 if (threadid < (int)thrd_last_action.size())
1303 return thrd_last_action[id_to_int(tid)];
1309 * @brief Get the last fence release performed by a particular Thread
1310 * @param tid The thread ID of the Thread in question
1311 * @return The last fence release in the thread, if one exists; NULL otherwise
1313 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1315 int threadid = id_to_int(tid);
1316 if (threadid < (int)thrd_last_fence_release.size())
1317 return thrd_last_fence_release[id_to_int(tid)];
1323 * Gets the last memory_order_seq_cst write (in the total global sequence)
1324 * performed on a particular object (i.e., memory location), not including the
1326 * @param curr The current ModelAction; also denotes the object location to
1328 * @return The last seq_cst write
1330 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1332 void *location = curr->get_location();
1333 return obj_last_sc_map.get(location);
1337 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1338 * performed in a particular thread, prior to a particular fence.
1339 * @param tid The ID of the thread to check
1340 * @param before_fence The fence from which to begin the search; if NULL, then
1341 * search for the most recent fence in the thread.
1342 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1344 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1346 /* All fences should have location FENCE_LOCATION */
1347 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1352 sllnode<ModelAction*>* rit = list->end();
1355 for (;rit != NULL;rit=rit->getPrev())
1356 if (rit->getVal() == before_fence)
1359 ASSERT(rit->getVal() == before_fence);
1363 for (;rit != NULL;rit=rit->getPrev()) {
1364 ModelAction *act = rit->getVal();
1365 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1372 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1373 * location). This function identifies the mutex according to the current
1374 * action, which is presumed to perform on the same mutex.
1375 * @param curr The current ModelAction; also denotes the object location to
1377 * @return The last unlock operation
1379 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1381 void *location = curr->get_location();
1383 simple_action_list_t *list = obj_map.get(location);
1387 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1388 sllnode<ModelAction*>* rit;
1389 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1390 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1391 return rit->getVal();
1395 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1397 ModelAction *parent = get_last_action(tid);
1399 parent = get_thread(tid)->get_creation();
1404 * Returns the clock vector for a given thread.
1405 * @param tid The thread whose clock vector we want
1406 * @return Desired clock vector
1408 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1410 ModelAction *firstaction=get_parent_action(tid);
1411 return firstaction != NULL ? firstaction->get_cv() : NULL;
1414 bool valequals(uint64_t val1, uint64_t val2, int size) {
1417 return ((uint8_t)val1) == ((uint8_t)val2);
1419 return ((uint16_t)val1) == ((uint16_t)val2);
1421 return ((uint32_t)val1) == ((uint32_t)val2);
1431 * Build up an initial set of all past writes that this 'read' action may read
1432 * from, as well as any previously-observed future values that must still be valid.
1434 * @param curr is the current ModelAction that we are exploring; it must be a
1437 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1439 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1441 ASSERT(curr->is_read());
1443 ModelAction *last_sc_write = NULL;
1445 if (curr->is_seqcst())
1446 last_sc_write = get_last_seq_cst_write(curr);
1448 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1450 /* Iterate over all threads */
1451 if (thrd_lists != NULL)
1452 for (i = 0;i < thrd_lists->size();i++) {
1453 /* Iterate over actions in thread, starting from most recent */
1454 simple_action_list_t *list = &(*thrd_lists)[i];
1455 sllnode<ModelAction *> * rit;
1456 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1457 ModelAction *act = rit->getVal();
1462 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1463 bool allow_read = true;
1465 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1468 /* Need to check whether we will have two RMW reading from the same value */
1469 if (curr->is_rmwr()) {
1470 /* It is okay if we have a failing CAS */
1471 if (!curr->is_rmwrcas() ||
1472 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1473 //Need to make sure we aren't the second RMW
1474 CycleNode * node = mo_graph->getNode_noCreate(act);
1475 if (node != NULL && node->getRMW() != NULL) {
1476 //we are the second RMW
1483 /* Only add feasible reads */
1484 rf_set->push_back(act);
1487 /* Include at most one act per-thread that "happens before" curr */
1488 if (act->happens_before(curr))
1493 if (DBG_ENABLED()) {
1494 model_print("Reached read action:\n");
1496 model_print("End printing read_from_past\n");
1501 static void print_list(action_list_t *list)
1503 sllnode<ModelAction*> *it;
1505 model_print("------------------------------------------------------------------------------------\n");
1506 model_print("# t Action type MO Location Value Rf CV\n");
1507 model_print("------------------------------------------------------------------------------------\n");
1509 unsigned int hash = 0;
1511 for (it = list->begin();it != NULL;it=it->getNext()) {
1512 const ModelAction *act = it->getVal();
1513 if (act->get_seq_number() > 0)
1515 hash = hash^(hash<<3)^(it->getVal()->hash());
1517 model_print("HASH %u\n", hash);
1518 model_print("------------------------------------------------------------------------------------\n");
1521 #if SUPPORT_MOD_ORDER_DUMP
1522 void ModelExecution::dumpGraph(char *filename)
1525 sprintf(buffer, "%s.dot", filename);
1526 FILE *file = fopen(buffer, "w");
1527 fprintf(file, "digraph %s {\n", filename);
1528 mo_graph->dumpNodes(file);
1529 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1531 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1532 ModelAction *act = it->getVal();
1533 if (act->is_read()) {
1534 mo_graph->dot_print_node(file, act);
1535 mo_graph->dot_print_edge(file,
1536 act->get_reads_from(),
1538 "label=\"rf\", color=red, weight=2");
1540 if (thread_array[act->get_tid()]) {
1541 mo_graph->dot_print_edge(file,
1542 thread_array[id_to_int(act->get_tid())],
1544 "label=\"sb\", color=blue, weight=400");
1547 thread_array[act->get_tid()] = act;
1549 fprintf(file, "}\n");
1550 model_free(thread_array);
1555 /** @brief Prints an execution trace summary. */
1556 void ModelExecution::print_summary()
1558 #if SUPPORT_MOD_ORDER_DUMP
1559 char buffername[100];
1560 sprintf(buffername, "exec%04u", get_execution_number());
1561 mo_graph->dumpGraphToFile(buffername);
1562 sprintf(buffername, "graph%04u", get_execution_number());
1563 dumpGraph(buffername);
1566 model_print("Execution trace %d:", get_execution_number());
1567 if (scheduler->all_threads_sleeping())
1568 model_print(" SLEEP-SET REDUNDANT");
1569 if (have_bug_reports())
1570 model_print(" DETECTED BUG(S)");
1574 print_list(&action_trace);
1579 void ModelExecution::print_tail()
1581 model_print("Execution trace %d:\n", get_execution_number());
1583 sllnode<ModelAction*> *it;
1585 model_print("------------------------------------------------------------------------------------\n");
1586 model_print("# t Action type MO Location Value Rf CV\n");
1587 model_print("------------------------------------------------------------------------------------\n");
1589 unsigned int hash = 0;
1593 SnapList<ModelAction *> list;
1594 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1595 if (counter > length)
1598 ModelAction * act = it->getVal();
1599 list.push_front(act);
1603 for (it = list.begin();it != NULL;it=it->getNext()) {
1604 const ModelAction *act = it->getVal();
1605 if (act->get_seq_number() > 0)
1607 hash = hash^(hash<<3)^(it->getVal()->hash());
1609 model_print("HASH %u\n", hash);
1610 model_print("------------------------------------------------------------------------------------\n");
1614 * Add a Thread to the system for the first time. Should only be called once
1616 * @param t The Thread to add
1618 void ModelExecution::add_thread(Thread *t)
1620 unsigned int i = id_to_int(t->get_id());
1621 if (i >= thread_map.size())
1622 thread_map.resize(i + 1);
1624 if (!t->is_model_thread())
1625 scheduler->add_thread(t);
1629 * @brief Get a Thread reference by its ID
1630 * @param tid The Thread's ID
1631 * @return A Thread reference
1633 Thread * ModelExecution::get_thread(thread_id_t tid) const
1635 unsigned int i = id_to_int(tid);
1636 if (i < thread_map.size())
1637 return thread_map[i];
1642 * @brief Get a reference to the Thread in which a ModelAction was executed
1643 * @param act The ModelAction
1644 * @return A Thread reference
1646 Thread * ModelExecution::get_thread(const ModelAction *act) const
1648 return get_thread(act->get_tid());
1652 * @brief Get a Thread reference by its pthread ID
1653 * @param index The pthread's ID
1654 * @return A Thread reference
1656 Thread * ModelExecution::get_pthread(pthread_t pid) {
1657 // pid 1 is reserved for the main thread, pthread ids should start from 2
1659 return get_thread(pid);
1666 uint32_t thread_id = x.v;
1668 if (thread_id < pthread_counter + 1)
1669 return pthread_map[thread_id];
1675 * @brief Check if a Thread is currently enabled
1676 * @param t The Thread to check
1677 * @return True if the Thread is currently enabled
1679 bool ModelExecution::is_enabled(Thread *t) const
1681 return scheduler->is_enabled(t);
1685 * @brief Check if a Thread is currently enabled
1686 * @param tid The ID of the Thread to check
1687 * @return True if the Thread is currently enabled
1689 bool ModelExecution::is_enabled(thread_id_t tid) const
1691 return scheduler->is_enabled(tid);
1695 * @brief Select the next thread to execute based on the curren action
1697 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1698 * actions should be followed by the execution of their child thread. In either
1699 * case, the current action should determine the next thread schedule.
1701 * @param curr The current action
1702 * @return The next thread to run, if the current action will determine this
1703 * selection; otherwise NULL
1705 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1707 /* Do not split atomic RMW */
1708 if (curr->is_rmwr())
1709 return get_thread(curr);
1710 /* Follow CREATE with the created thread */
1711 /* which is not needed, because model.cc takes care of this */
1712 if (curr->get_type() == THREAD_CREATE)
1713 return curr->get_thread_operand();
1714 if (curr->get_type() == PTHREAD_CREATE) {
1715 return curr->get_thread_operand();
1721 * Takes the next step in the execution, if possible.
1722 * @param curr The current step to take
1723 * @return Returns the next Thread to run, if any; NULL if this execution
1726 Thread * ModelExecution::take_step(ModelAction *curr)
1728 Thread *curr_thrd = get_thread(curr);
1729 ASSERT(curr_thrd->get_state() == THREAD_READY);
1731 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1732 curr = check_current_action(curr);
1735 /* Process this action in ModelHistory for records */
1737 model->get_history()->process_action( curr, curr->get_tid() );
1739 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1740 scheduler->remove_thread(curr_thrd);
1742 return action_select_next_thread(curr);
1745 /** This method removes references to an Action before we delete it. */
1747 void ModelExecution::removeAction(ModelAction *act) {
1749 action_trace.removeAction(act);
1752 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1753 (*vec)[act->get_tid()].removeAction(act);
1755 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1756 sllnode<ModelAction *> * listref = act->getActionRef();
1757 if (listref != NULL) {
1758 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1759 list->erase(listref);
1761 } else if (act->is_wait()) {
1762 sllnode<ModelAction *> * listref = act->getActionRef();
1763 if (listref != NULL) {
1764 void *mutex_loc = (void *) act->get_value();
1765 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1767 } else if (act->is_free()) {
1768 sllnode<ModelAction *> * listref = act->getActionRef();
1769 if (listref != NULL) {
1770 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1771 (*vec)[act->get_tid()].erase(listref);
1774 //Clear it from last_sc_map
1775 if (obj_last_sc_map.get(act->get_location()) == act) {
1776 obj_last_sc_map.remove(act->get_location());
1779 //Remove from Cyclegraph
1780 mo_graph->freeAction(act);
1784 /** Computes clock vector that all running threads have already synchronized to. */
1786 ClockVector * ModelExecution::computeMinimalCV() {
1787 ClockVector *cvmin = NULL;
1788 //Thread 0 isn't a real thread, so skip it..
1789 for(unsigned int i = 1;i < thread_map.size();i++) {
1790 Thread * t = thread_map[i];
1791 if (t->is_complete())
1793 thread_id_t tid = int_to_id(i);
1794 ClockVector * cv = get_cv(tid);
1796 cvmin = new ClockVector(cv, NULL);
1798 cvmin->minmerge(cv);
1804 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1806 void ModelExecution::fixupLastAct(ModelAction *act) {
1807 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1808 newact->set_seq_number(get_next_seq_num());
1809 newact->create_cv(act);
1810 newact->set_last_fence_release(act->get_last_fence_release());
1811 add_action_to_lists(newact, false);
1814 /** Compute which actions to free. */
1816 void ModelExecution::collectActions() {
1817 if (priv->used_sequence_numbers < params->traceminsize)
1820 //Compute minimal clock vector for all live threads
1821 ClockVector *cvmin = computeMinimalCV();
1822 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1823 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1825 //Next walk action trace... When we hit an action, see if it is
1826 //invisible (e.g., earlier than the first before the minimum
1827 //clock for the thread... if so erase it and all previous
1828 //actions in cyclegraph
1829 sllnode<ModelAction*> * it;
1830 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1831 ModelAction *act = it->getVal();
1832 modelclock_t actseq = act->get_seq_number();
1834 //See if we are done
1835 if (actseq > maxtofree)
1838 thread_id_t act_tid = act->get_tid();
1839 modelclock_t tid_clock = cvmin->getClock(act_tid);
1841 //Free if it is invisible or we have set a flag to remove visible actions.
1842 if (actseq <= tid_clock || params->removevisible) {
1843 ModelAction * write;
1844 if (act->is_write()) {
1846 } else if (act->is_read()) {
1847 write = act->get_reads_from();
1851 //Mark everything earlier in MO graph to be freed
1852 CycleNode * cn = mo_graph->getNode_noCreate(write);
1854 queue->push_back(cn);
1855 while(!queue->empty()) {
1856 CycleNode * node = queue->back();
1858 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1859 CycleNode * prevnode = node->getInEdge(i);
1860 ModelAction * prevact = prevnode->getAction();
1861 if (prevact->get_type() != READY_FREE) {
1862 prevact->set_free();
1863 queue->push_back(prevnode);
1871 //We may need to remove read actions in the window we don't delete to preserve correctness.
1873 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1874 ModelAction *act = it2->getVal();
1875 //Do iteration early in case we delete the act
1877 bool islastact = false;
1878 ModelAction *lastact = get_last_action(act->get_tid());
1879 if (act == lastact) {
1880 Thread * th = get_thread(act);
1881 islastact = !th->is_complete();
1884 if (act->is_read()) {
1885 if (act->get_reads_from()->is_free()) {
1886 if (act->is_rmw()) {
1887 //Weaken a RMW from a freed store to a write
1888 act->set_type(ATOMIC_WRITE);
1900 //If we don't delete the action, we should remove references to release fences
1902 const ModelAction *rel_fence =act->get_last_fence_release();
1903 if (rel_fence != NULL) {
1904 modelclock_t relfenceseq = rel_fence->get_seq_number();
1905 thread_id_t relfence_tid = rel_fence->get_tid();
1906 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1907 //Remove references to irrelevant release fences
1908 if (relfenceseq <= tid_clock)
1909 act->set_last_fence_release(NULL);
1912 //Now we are in the window of old actions that we remove if possible
1913 for (;it != NULL;) {
1914 ModelAction *act = it->getVal();
1915 //Do iteration early since we may delete node...
1917 bool islastact = false;
1918 ModelAction *lastact = get_last_action(act->get_tid());
1919 if (act == lastact) {
1920 Thread * th = get_thread(act);
1921 islastact = !th->is_complete();
1924 if (act->is_read()) {
1925 if (act->get_reads_from()->is_free()) {
1926 if (act->is_rmw()) {
1927 act->set_type(ATOMIC_WRITE);
1937 } else if (act->is_free()) {
1944 } else if (act->is_write()) {
1945 //Do nothing with write that hasn't been marked to be freed
1946 } else if (islastact) {
1947 //Keep the last action for non-read/write actions
1948 } else if (act->is_fence()) {
1949 //Note that acquire fences can always be safely
1950 //removed, but could incur extra overheads in
1951 //traversals. Removing them before the cvmin seems
1952 //like a good compromise.
1954 //Release fences before the cvmin don't do anything
1955 //because everyone has already synchronized.
1957 //Sequentially fences before cvmin are redundant
1958 //because happens-before will enforce same
1961 modelclock_t actseq = act->get_seq_number();
1962 thread_id_t act_tid = act->get_tid();
1963 modelclock_t tid_clock = cvmin->getClock(act_tid);
1964 if (actseq <= tid_clock) {
1966 // Remove reference to act from thrd_last_fence_release
1967 int thread_id = id_to_int( act->get_tid() );
1968 if (thrd_last_fence_release[thread_id] == act) {
1969 thrd_last_fence_release[thread_id] = NULL;
1975 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1976 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1977 //need to keep most recent unlock/wait for each lock
1978 if(act->is_unlock() || act->is_wait()) {
1979 ModelAction * lastlock = get_last_unlock(act);
1980 if (lastlock != act) {
1985 } else if (act->is_create()) {
1986 if (act->get_thread_operand()->is_complete()) {
1998 //If we don't delete the action, we should remove references to release fences
1999 const ModelAction *rel_fence =act->get_last_fence_release();
2000 if (rel_fence != NULL) {
2001 modelclock_t relfenceseq = rel_fence->get_seq_number();
2002 thread_id_t relfence_tid = rel_fence->get_tid();
2003 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2004 //Remove references to irrelevant release fences
2005 if (relfenceseq <= tid_clock)
2006 act->set_last_fence_release(NULL);
2014 Fuzzer * ModelExecution::getFuzzer() {