11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
30 bad_synchronization(false),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
44 bool bad_synchronization;
50 /** @brief Constructor */
51 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
56 thread_map(2), /* We'll always need at least 2 threads */
60 condvar_waiters_map(),
64 thrd_last_fence_release(),
65 priv(new struct model_snapshot_members ()),
66 mo_graph(new CycleGraph()),
69 thrd_func_act_lists(),
72 /* Initialize a model-checker thread, for special ModelActions */
73 model_thread = new Thread(get_next_id());
74 add_thread(model_thread);
75 scheduler->register_engine(this);
78 /** @brief Destructor */
79 ModelExecution::~ModelExecution()
81 for (unsigned int i = 0;i < get_num_threads();i++)
82 delete get_thread(int_to_id(i));
88 int ModelExecution::get_execution_number() const
90 return model->get_execution_number();
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
95 action_list_t *tmp = hash->get(ptr);
97 tmp = new action_list_t();
103 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
105 SnapVector<action_list_t> *tmp = hash->get(ptr);
107 tmp = new SnapVector<action_list_t>();
113 /** @return a thread ID for a new Thread */
114 thread_id_t ModelExecution::get_next_id()
116 return priv->next_thread_id++;
119 /** @return the number of user threads created during this execution */
120 unsigned int ModelExecution::get_num_threads() const
122 return priv->next_thread_id;
125 /** @return a sequence number for a new ModelAction */
126 modelclock_t ModelExecution::get_next_seq_num()
128 return ++priv->used_sequence_numbers;
132 * @brief Should the current action wake up a given thread?
134 * @param curr The current action
135 * @param thread The thread that we might wake up
136 * @return True, if we should wake up the sleeping thread; false otherwise
138 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
140 const ModelAction *asleep = thread->get_pending();
141 /* Don't allow partial RMW to wake anyone up */
144 /* Synchronizing actions may have been backtracked */
145 if (asleep->could_synchronize_with(curr))
147 /* All acquire/release fences and fence-acquire/store-release */
148 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
150 /* Fence-release + store can awake load-acquire on the same location */
151 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
152 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
153 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
156 if (asleep->is_sleep()) {
157 if (fuzzer->shouldWake(asleep))
164 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
166 for (unsigned int i = 0;i < get_num_threads();i++) {
167 Thread *thr = get_thread(int_to_id(i));
168 if (scheduler->is_sleep_set(thr)) {
169 if (should_wake_up(curr, thr))
170 /* Remove this thread from sleep set */
171 scheduler->remove_sleep(thr);
176 /** @brief Alert the model-checker that an incorrectly-ordered
177 * synchronization was made */
178 void ModelExecution::set_bad_synchronization()
180 priv->bad_synchronization = true;
183 bool ModelExecution::assert_bug(const char *msg)
185 priv->bugs.push_back(new bug_message(msg));
187 if (isfeasibleprefix()) {
194 /** @return True, if any bugs have been reported for this execution */
195 bool ModelExecution::have_bug_reports() const
197 return priv->bugs.size() != 0;
200 /** @return True, if any fatal bugs have been reported for this execution.
201 * Any bug other than a data race is considered a fatal bug. Data races
202 * are not considered fatal unless the number of races is exceeds
203 * a threshold (temporarily set as 15).
205 bool ModelExecution::have_fatal_bug_reports() const
207 return priv->bugs.size() != 0;
210 SnapVector<bug_message *> * ModelExecution::get_bugs() const
216 * Check whether the current trace has triggered an assertion which should halt
219 * @return True, if the execution should be aborted; false otherwise
221 bool ModelExecution::has_asserted() const
223 return priv->asserted;
227 * Trigger a trace assertion which should cause this execution to be halted.
228 * This can be due to a detected bug or due to an infeasibility that should
231 void ModelExecution::set_assert()
233 priv->asserted = true;
237 * Check if we are in a deadlock. Should only be called at the end of an
238 * execution, although it should not give false positives in the middle of an
239 * execution (there should be some ENABLED thread).
241 * @return True if program is in a deadlock; false otherwise
243 bool ModelExecution::is_deadlocked() const
245 bool blocking_threads = false;
246 for (unsigned int i = 0;i < get_num_threads();i++) {
247 thread_id_t tid = int_to_id(i);
250 Thread *t = get_thread(tid);
251 if (!t->is_model_thread() && t->get_pending())
252 blocking_threads = true;
254 return blocking_threads;
258 * Check if this is a complete execution. That is, have all thread completed
259 * execution (rather than exiting because sleep sets have forced a redundant
262 * @return True if the execution is complete.
264 bool ModelExecution::is_complete_execution() const
266 for (unsigned int i = 0;i < get_num_threads();i++)
267 if (is_enabled(int_to_id(i)))
272 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
273 uint64_t value = *((const uint64_t *) location);
274 modelclock_t storeclock;
275 thread_id_t storethread;
276 getStoreThreadAndClock(location, &storethread, &storeclock);
277 setAtomicStoreFlag(location);
278 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
279 act->set_seq_number(storeclock);
280 add_normal_write_to_lists(act);
281 add_write_to_lists(act);
282 w_modification_order(act);
283 model->get_history()->process_action(act, act->get_tid());
288 * Processes a read model action.
289 * @param curr is the read model action to process.
290 * @param rf_set is the set of model actions we can possibly read from
291 * @return True if processing this read updates the mo_graph.
293 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
295 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
296 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
297 if (hasnonatomicstore) {
298 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
299 rf_set->push_back(nonatomicstore);
303 int index = fuzzer->selectWrite(curr, rf_set);
304 ModelAction *rf = (*rf_set)[index];
308 bool canprune = false;
309 if (r_modification_order(curr, rf, priorset, &canprune)) {
310 for(unsigned int i=0;i<priorset->size();i++) {
311 mo_graph->addEdge((*priorset)[i], rf);
314 get_thread(curr)->set_return_value(curr->get_return_value());
316 if (canprune && curr->get_type() == ATOMIC_READ) {
317 int tid = id_to_int(curr->get_tid());
318 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
323 (*rf_set)[index] = rf_set->back();
329 * Processes a lock, trylock, or unlock model action. @param curr is
330 * the read model action to process.
332 * The try lock operation checks whether the lock is taken. If not,
333 * it falls to the normal lock operation case. If so, it returns
336 * The lock operation has already been checked that it is enabled, so
337 * it just grabs the lock and synchronizes with the previous unlock.
339 * The unlock operation has to re-enable all of the threads that are
340 * waiting on the lock.
342 * @return True if synchronization was updated; false otherwise
344 bool ModelExecution::process_mutex(ModelAction *curr)
346 cdsc::mutex *mutex = curr->get_mutex();
347 struct cdsc::mutex_state *state = NULL;
350 state = mutex->get_state();
352 switch (curr->get_type()) {
353 case ATOMIC_TRYLOCK: {
354 bool success = !state->locked;
355 curr->set_try_lock(success);
357 get_thread(curr)->set_return_value(0);
360 get_thread(curr)->set_return_value(1);
362 //otherwise fall into the lock case
364 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
365 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
366 // assert_bug("Lock access before initialization");
367 state->locked = get_thread(curr);
368 ModelAction *unlock = get_last_unlock(curr);
369 //synchronize with the previous unlock statement
370 if (unlock != NULL) {
371 synchronize(unlock, curr);
377 case ATOMIC_UNLOCK: {
378 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
380 /* wake up the other threads */
381 for (unsigned int i = 0;i < get_num_threads();i++) {
382 Thread *t = get_thread(int_to_id(i));
383 Thread *curr_thrd = get_thread(curr);
384 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
388 /* unlock the lock - after checking who was waiting on it */
389 state->locked = NULL;
391 if (!curr->is_wait())
392 break;/* The rest is only for ATOMIC_WAIT */
396 case ATOMIC_NOTIFY_ALL: {
397 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
398 //activate all the waiting threads
399 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
400 scheduler->wake(get_thread(rit->getVal()));
405 case ATOMIC_NOTIFY_ONE: {
406 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
407 if (waiters->size() != 0) {
408 Thread * thread = fuzzer->selectNotify(waiters);
409 scheduler->wake(thread);
421 * Process a write ModelAction
422 * @param curr The ModelAction to process
423 * @return True if the mo_graph was updated or promises were resolved
425 void ModelExecution::process_write(ModelAction *curr)
427 w_modification_order(curr);
428 get_thread(curr)->set_return_value(VALUE_NONE);
432 * Process a fence ModelAction
433 * @param curr The ModelAction to process
434 * @return True if synchronization was updated
436 bool ModelExecution::process_fence(ModelAction *curr)
439 * fence-relaxed: no-op
440 * fence-release: only log the occurence (not in this function), for
441 * use in later synchronization
442 * fence-acquire (this function): search for hypothetical release
444 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
446 bool updated = false;
447 if (curr->is_acquire()) {
448 action_list_t *list = &action_trace;
449 sllnode<ModelAction *> * rit;
450 /* Find X : is_read(X) && X --sb-> curr */
451 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
452 ModelAction *act = rit->getVal();
455 if (act->get_tid() != curr->get_tid())
457 /* Stop at the beginning of the thread */
458 if (act->is_thread_start())
460 /* Stop once we reach a prior fence-acquire */
461 if (act->is_fence() && act->is_acquire())
465 /* read-acquire will find its own release sequences */
466 if (act->is_acquire())
469 /* Establish hypothetical release sequences */
470 ClockVector *cv = get_hb_from_write(act->get_reads_from());
471 if (cv != NULL && curr->get_cv()->merge(cv))
479 * @brief Process the current action for thread-related activity
481 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
482 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
483 * synchronization, etc. This function is a no-op for non-THREAD actions
484 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
486 * @param curr The current action
487 * @return True if synchronization was updated or a thread completed
489 void ModelExecution::process_thread_action(ModelAction *curr)
491 switch (curr->get_type()) {
492 case THREAD_CREATE: {
493 thrd_t *thrd = (thrd_t *)curr->get_location();
494 struct thread_params *params = (struct thread_params *)curr->get_value();
495 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
496 curr->set_thread_operand(th);
498 th->set_creation(curr);
501 case PTHREAD_CREATE: {
502 (*(uint32_t *)curr->get_location()) = pthread_counter++;
504 struct pthread_params *params = (struct pthread_params *)curr->get_value();
505 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
506 curr->set_thread_operand(th);
508 th->set_creation(curr);
510 if ( pthread_map.size() < pthread_counter )
511 pthread_map.resize( pthread_counter );
512 pthread_map[ pthread_counter-1 ] = th;
517 Thread *blocking = curr->get_thread_operand();
518 ModelAction *act = get_last_action(blocking->get_id());
519 synchronize(act, curr);
523 Thread *blocking = curr->get_thread_operand();
524 ModelAction *act = get_last_action(blocking->get_id());
525 synchronize(act, curr);
526 break; // WL: to be add (modified)
529 case THREADONLY_FINISH:
530 case THREAD_FINISH: {
531 Thread *th = get_thread(curr);
532 if (curr->get_type() == THREAD_FINISH &&
533 th == model->getInitThread()) {
539 /* Wake up any joining threads */
540 for (unsigned int i = 0;i < get_num_threads();i++) {
541 Thread *waiting = get_thread(int_to_id(i));
542 if (waiting->waiting_on() == th &&
543 waiting->get_pending()->is_thread_join())
544 scheduler->wake(waiting);
558 * Initialize the current action by performing one or more of the following
559 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
560 * manipulating backtracking sets, allocating and
561 * initializing clock vectors, and computing the promises to fulfill.
563 * @param curr The current action, as passed from the user context; may be
564 * freed/invalidated after the execution of this function, with a different
565 * action "returned" its place (pass-by-reference)
566 * @return True if curr is a newly-explored action; false otherwise
568 bool ModelExecution::initialize_curr_action(ModelAction **curr)
570 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
571 ModelAction *newcurr = process_rmw(*curr);
577 ModelAction *newcurr = *curr;
579 newcurr->set_seq_number(get_next_seq_num());
580 /* Always compute new clock vector */
581 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
583 /* Assign most recent release fence */
584 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
586 return true; /* This was a new ModelAction */
591 * @brief Establish reads-from relation between two actions
593 * Perform basic operations involved with establishing a concrete rf relation,
594 * including setting the ModelAction data and checking for release sequences.
596 * @param act The action that is reading (must be a read)
597 * @param rf The action from which we are reading (must be a write)
599 * @return True if this read established synchronization
602 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
605 ASSERT(rf->is_write());
607 act->set_read_from(rf);
608 if (act->is_acquire()) {
609 ClockVector *cv = get_hb_from_write(rf);
612 act->get_cv()->merge(cv);
617 * @brief Synchronizes two actions
619 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
620 * This function performs the synchronization as well as providing other hooks
621 * for other checks along with synchronization.
623 * @param first The left-hand side of the synchronizes-with relation
624 * @param second The right-hand side of the synchronizes-with relation
625 * @return True if the synchronization was successful (i.e., was consistent
626 * with the execution order); false otherwise
628 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
630 if (*second < *first) {
631 set_bad_synchronization();
634 return second->synchronize_with(first);
638 * @brief Check whether a model action is enabled.
640 * Checks whether an operation would be successful (i.e., is a lock already
641 * locked, or is the joined thread already complete).
643 * For yield-blocking, yields are never enabled.
645 * @param curr is the ModelAction to check whether it is enabled.
646 * @return a bool that indicates whether the action is enabled.
648 bool ModelExecution::check_action_enabled(ModelAction *curr) {
649 if (curr->is_lock()) {
650 cdsc::mutex *lock = curr->get_mutex();
651 struct cdsc::mutex_state *state = lock->get_state();
654 } else if (curr->is_thread_join()) {
655 Thread *blocking = curr->get_thread_operand();
656 if (!blocking->is_complete()) {
659 } else if (curr->is_sleep()) {
660 if (!fuzzer->shouldSleep(curr))
668 * This is the heart of the model checker routine. It performs model-checking
669 * actions corresponding to a given "current action." Among other processes, it
670 * calculates reads-from relationships, updates synchronization clock vectors,
671 * forms a memory_order constraints graph, and handles replay/backtrack
672 * execution when running permutations of previously-observed executions.
674 * @param curr The current action to process
675 * @return The ModelAction that is actually executed; may be different than
678 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
681 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
682 bool newly_explored = initialize_curr_action(&curr);
686 wake_up_sleeping_actions(curr);
688 /* Add the action to lists before any other model-checking tasks */
689 if (!second_part_of_rmw && curr->get_type() != NOOP)
690 add_action_to_lists(curr);
692 if (curr->is_write())
693 add_write_to_lists(curr);
695 SnapVector<ModelAction *> * rf_set = NULL;
696 /* Build may_read_from set for newly-created actions */
697 if (newly_explored && curr->is_read())
698 rf_set = build_may_read_from(curr);
700 process_thread_action(curr);
702 if (curr->is_read() && !second_part_of_rmw) {
703 process_read(curr, rf_set);
706 ASSERT(rf_set == NULL);
709 if (curr->is_write())
712 if (curr->is_fence())
715 if (curr->is_mutex_op())
722 * This is the strongest feasibility check available.
723 * @return whether the current trace (partial or complete) must be a prefix of
726 bool ModelExecution::isfeasibleprefix() const
728 return !is_infeasible();
732 * Print disagnostic information about an infeasible execution
733 * @param prefix A string to prefix the output with; if NULL, then a default
734 * message prefix will be provided
736 void ModelExecution::print_infeasibility(const char *prefix) const
740 if (priv->bad_synchronization)
741 ptr += sprintf(ptr, "[bad sw ordering]");
743 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
747 * Check if the current partial trace is infeasible. Does not check any
748 * end-of-execution flags, which might rule out the execution. Thus, this is
749 * useful only for ruling an execution as infeasible.
750 * @return whether the current partial trace is infeasible.
752 bool ModelExecution::is_infeasible() const
754 return priv->bad_synchronization;
757 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
758 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
759 ModelAction *lastread = get_last_action(act->get_tid());
760 lastread->process_rmw(act);
762 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
768 * @brief Updates the mo_graph with the constraints imposed from the current
771 * Basic idea is the following: Go through each other thread and find
772 * the last action that happened before our read. Two cases:
774 * -# The action is a write: that write must either occur before
775 * the write we read from or be the write we read from.
776 * -# The action is a read: the write that that action read from
777 * must occur before the write we read from or be the same write.
779 * @param curr The current action. Must be a read.
780 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
781 * @return True if modification order edges were added; false otherwise
784 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
786 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
788 ASSERT(curr->is_read());
790 /* Last SC fence in the current thread */
791 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
793 int tid = curr->get_tid();
794 ModelAction *prev_same_thread = NULL;
795 /* Iterate over all threads */
796 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
797 /* Last SC fence in thread tid */
798 ModelAction *last_sc_fence_thread_local = NULL;
800 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
802 /* Last SC fence in thread tid, before last SC fence in current thread */
803 ModelAction *last_sc_fence_thread_before = NULL;
804 if (last_sc_fence_local)
805 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
807 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
808 if (prev_same_thread != NULL &&
809 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
810 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
814 /* Iterate over actions in thread, starting from most recent */
815 action_list_t *list = &(*thrd_lists)[tid];
816 sllnode<ModelAction *> * rit;
817 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
818 ModelAction *act = rit->getVal();
823 /* Don't want to add reflexive edges on 'rf' */
824 if (act->equals(rf)) {
825 if (act->happens_before(curr))
831 if (act->is_write()) {
832 /* C++, Section 29.3 statement 5 */
833 if (curr->is_seqcst() && last_sc_fence_thread_local &&
834 *act < *last_sc_fence_thread_local) {
835 if (mo_graph->checkReachable(rf, act))
837 priorset->push_back(act);
840 /* C++, Section 29.3 statement 4 */
841 else if (act->is_seqcst() && last_sc_fence_local &&
842 *act < *last_sc_fence_local) {
843 if (mo_graph->checkReachable(rf, act))
845 priorset->push_back(act);
848 /* C++, Section 29.3 statement 6 */
849 else if (last_sc_fence_thread_before &&
850 *act < *last_sc_fence_thread_before) {
851 if (mo_graph->checkReachable(rf, act))
853 priorset->push_back(act);
859 * Include at most one act per-thread that "happens
862 if (act->happens_before(curr)) {
864 if (last_sc_fence_local == NULL ||
865 (*last_sc_fence_local < *act)) {
866 prev_same_thread = act;
869 if (act->is_write()) {
870 if (mo_graph->checkReachable(rf, act))
872 priorset->push_back(act);
874 const ModelAction *prevrf = act->get_reads_from();
875 if (!prevrf->equals(rf)) {
876 if (mo_graph->checkReachable(rf, prevrf))
878 priorset->push_back(prevrf);
880 if (act->get_tid() == curr->get_tid()) {
881 //Can prune curr from obj list
894 * Updates the mo_graph with the constraints imposed from the current write.
896 * Basic idea is the following: Go through each other thread and find
897 * the lastest action that happened before our write. Two cases:
899 * (1) The action is a write => that write must occur before
902 * (2) The action is a read => the write that that action read from
903 * must occur before the current write.
905 * This method also handles two other issues:
907 * (I) Sequential Consistency: Making sure that if the current write is
908 * seq_cst, that it occurs after the previous seq_cst write.
910 * (II) Sending the write back to non-synchronizing reads.
912 * @param curr The current action. Must be a write.
913 * @param send_fv A vector for stashing reads to which we may pass our future
914 * value. If NULL, then don't record any future values.
915 * @return True if modification order edges were added; false otherwise
917 void ModelExecution::w_modification_order(ModelAction *curr)
919 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
921 ASSERT(curr->is_write());
923 SnapList<ModelAction *> edgeset;
925 if (curr->is_seqcst()) {
926 /* We have to at least see the last sequentially consistent write,
927 so we are initialized. */
928 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
929 if (last_seq_cst != NULL) {
930 edgeset.push_back(last_seq_cst);
932 //update map for next query
933 obj_last_sc_map.put(curr->get_location(), curr);
936 /* Last SC fence in the current thread */
937 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
939 /* Iterate over all threads */
940 for (i = 0;i < thrd_lists->size();i++) {
941 /* Last SC fence in thread i, before last SC fence in current thread */
942 ModelAction *last_sc_fence_thread_before = NULL;
943 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
944 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
946 /* Iterate over actions in thread, starting from most recent */
947 action_list_t *list = &(*thrd_lists)[i];
948 sllnode<ModelAction*>* rit;
949 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
950 ModelAction *act = rit->getVal();
953 * 1) If RMW and it actually read from something, then we
954 * already have all relevant edges, so just skip to next
957 * 2) If RMW and it didn't read from anything, we should
958 * whatever edge we can get to speed up convergence.
960 * 3) If normal write, we need to look at earlier actions, so
961 * continue processing list.
963 if (curr->is_rmw()) {
964 if (curr->get_reads_from() != NULL)
972 /* C++, Section 29.3 statement 7 */
973 if (last_sc_fence_thread_before && act->is_write() &&
974 *act < *last_sc_fence_thread_before) {
975 edgeset.push_back(act);
980 * Include at most one act per-thread that "happens
983 if (act->happens_before(curr)) {
985 * Note: if act is RMW, just add edge:
987 * The following edge should be handled elsewhere:
988 * readfrom(act) --mo--> act
991 edgeset.push_back(act);
992 else if (act->is_read()) {
993 //if previous read accessed a null, just keep going
994 edgeset.push_back(act->get_reads_from());
1000 mo_graph->addEdges(&edgeset, curr);
1005 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1006 * some constraints. This method checks one the following constraint (others
1007 * require compiler support):
1009 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1010 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1012 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1014 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1016 /* Iterate over all threads */
1017 for (i = 0;i < thrd_lists->size();i++) {
1018 const ModelAction *write_after_read = NULL;
1020 /* Iterate over actions in thread, starting from most recent */
1021 action_list_t *list = &(*thrd_lists)[i];
1022 sllnode<ModelAction *>* rit;
1023 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1024 ModelAction *act = rit->getVal();
1026 /* Don't disallow due to act == reader */
1027 if (!reader->happens_before(act) || reader == act)
1029 else if (act->is_write())
1030 write_after_read = act;
1031 else if (act->is_read() && act->get_reads_from() != NULL)
1032 write_after_read = act->get_reads_from();
1035 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1042 * Computes the clock vector that happens before propagates from this write.
1044 * @param rf The action that might be part of a release sequence. Must be a
1046 * @return ClockVector of happens before relation.
1049 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1050 SnapVector<ModelAction *> * processset = NULL;
1051 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1052 ASSERT(rf->is_write());
1053 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1055 if (processset == NULL)
1056 processset = new SnapVector<ModelAction *>();
1057 processset->push_back(rf);
1060 int i = (processset == NULL) ? 0 : processset->size();
1062 ClockVector * vec = NULL;
1064 if (rf->get_rfcv() != NULL) {
1065 vec = rf->get_rfcv();
1066 } else if (rf->is_acquire() && rf->is_release()) {
1068 } else if (rf->is_release() && !rf->is_rmw()) {
1070 } else if (rf->is_release()) {
1071 //have rmw that is release and doesn't have a rfcv
1072 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1075 //operation that isn't release
1076 if (rf->get_last_fence_release()) {
1078 vec = rf->get_last_fence_release()->get_cv();
1080 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1086 rf = (*processset)[i];
1090 if (processset != NULL)
1096 * Performs various bookkeeping operations for the current ModelAction. For
1097 * instance, adds action to the per-object, per-thread action vector and to the
1098 * action trace list of all thread actions.
1100 * @param act is the ModelAction to add.
1102 void ModelExecution::add_action_to_lists(ModelAction *act)
1104 int tid = id_to_int(act->get_tid());
1105 ModelAction *uninit = NULL;
1107 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1108 if (list->empty() && act->is_atomic_var()) {
1109 uninit = get_uninitialized_action(act);
1110 uninit_id = id_to_int(uninit->get_tid());
1111 list->push_front(uninit);
1112 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1113 if (uninit_id >= (int)vec->size()) {
1114 int oldsize = (int) vec->size();
1115 vec->resize(uninit_id + 1);
1116 for(int i=oldsize;i<uninit_id+1;i++) {
1117 new(&(*vec)[i]) action_list_t();
1120 (*vec)[uninit_id].push_front(uninit);
1122 list->push_back(act);
1124 // Update action trace, a total order of all actions
1125 action_trace.push_back(act);
1127 action_trace.push_front(uninit);
1129 // Update obj_thrd_map, a per location, per thread, order of actions
1130 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1131 if (tid >= (int)vec->size()) {
1132 uint oldsize =vec->size();
1133 vec->resize(priv->next_thread_id);
1134 for(uint i=oldsize;i<priv->next_thread_id;i++)
1135 new (&(*vec)[i]) action_list_t();
1137 (*vec)[tid].push_back(act);
1139 (*vec)[uninit_id].push_front(uninit);
1141 // Update thrd_last_action, the last action taken by each thrad
1142 if ((int)thrd_last_action.size() <= tid)
1143 thrd_last_action.resize(get_num_threads());
1144 thrd_last_action[tid] = act;
1146 thrd_last_action[uninit_id] = uninit;
1148 // Update thrd_last_fence_release, the last release fence taken by each thread
1149 if (act->is_fence() && act->is_release()) {
1150 if ((int)thrd_last_fence_release.size() <= tid)
1151 thrd_last_fence_release.resize(get_num_threads());
1152 thrd_last_fence_release[tid] = act;
1155 if (act->is_wait()) {
1156 void *mutex_loc = (void *) act->get_value();
1157 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1159 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1160 if (tid >= (int)vec->size()) {
1161 uint oldsize = vec->size();
1162 vec->resize(priv->next_thread_id);
1163 for(uint i=oldsize;i<priv->next_thread_id;i++)
1164 new (&(*vec)[i]) action_list_t();
1166 (*vec)[tid].push_back(act);
1170 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1171 sllnode<ModelAction*> * rit = list->end();
1172 modelclock_t next_seq = act->get_seq_number();
1173 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1174 list->push_back(act);
1176 for(;rit != NULL;rit=rit->getPrev()) {
1177 if (rit->getVal()->get_seq_number() == next_seq) {
1178 list->insertAfter(rit, act);
1185 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1186 sllnode<ModelAction*> * rit = list->end();
1187 modelclock_t next_seq = act->get_seq_number();
1189 act->create_cv(NULL);
1190 } else if (rit->getVal()->get_seq_number() == next_seq) {
1191 act->create_cv(rit->getVal());
1192 list->push_back(act);
1194 for(;rit != NULL;rit=rit->getPrev()) {
1195 if (rit->getVal()->get_seq_number() == next_seq) {
1196 act->create_cv(rit->getVal());
1197 list->insertAfter(rit, act);
1205 * Performs various bookkeeping operations for a normal write. The
1206 * complication is that we are typically inserting a normal write
1207 * lazily, so we need to insert it into the middle of lists.
1209 * @param act is the ModelAction to add.
1212 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1214 int tid = id_to_int(act->get_tid());
1215 insertIntoActionListAndSetCV(&action_trace, act);
1217 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1218 insertIntoActionList(list, act);
1220 // Update obj_thrd_map, a per location, per thread, order of actions
1221 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1222 if (tid >= (int)vec->size()) {
1223 uint oldsize =vec->size();
1224 vec->resize(priv->next_thread_id);
1225 for(uint i=oldsize;i<priv->next_thread_id;i++)
1226 new (&(*vec)[i]) action_list_t();
1228 insertIntoActionList(&(*vec)[tid],act);
1230 // Update thrd_last_action, the last action taken by each thrad
1231 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1232 thrd_last_action[tid] = act;
1236 void ModelExecution::add_write_to_lists(ModelAction *write) {
1237 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1238 int tid = id_to_int(write->get_tid());
1239 if (tid >= (int)vec->size()) {
1240 uint oldsize =vec->size();
1241 vec->resize(priv->next_thread_id);
1242 for(uint i=oldsize;i<priv->next_thread_id;i++)
1243 new (&(*vec)[i]) action_list_t();
1245 (*vec)[tid].push_back(write);
1249 * @brief Get the last action performed by a particular Thread
1250 * @param tid The thread ID of the Thread in question
1251 * @return The last action in the thread
1253 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1255 int threadid = id_to_int(tid);
1256 if (threadid < (int)thrd_last_action.size())
1257 return thrd_last_action[id_to_int(tid)];
1263 * @brief Get the last fence release performed by a particular Thread
1264 * @param tid The thread ID of the Thread in question
1265 * @return The last fence release in the thread, if one exists; NULL otherwise
1267 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1269 int threadid = id_to_int(tid);
1270 if (threadid < (int)thrd_last_fence_release.size())
1271 return thrd_last_fence_release[id_to_int(tid)];
1277 * Gets the last memory_order_seq_cst write (in the total global sequence)
1278 * performed on a particular object (i.e., memory location), not including the
1280 * @param curr The current ModelAction; also denotes the object location to
1282 * @return The last seq_cst write
1284 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1286 void *location = curr->get_location();
1287 return obj_last_sc_map.get(location);
1291 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1292 * performed in a particular thread, prior to a particular fence.
1293 * @param tid The ID of the thread to check
1294 * @param before_fence The fence from which to begin the search; if NULL, then
1295 * search for the most recent fence in the thread.
1296 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1298 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1300 /* All fences should have location FENCE_LOCATION */
1301 action_list_t *list = obj_map.get(FENCE_LOCATION);
1306 sllnode<ModelAction*>* rit = list->end();
1309 for (;rit != NULL;rit=rit->getPrev())
1310 if (rit->getVal() == before_fence)
1313 ASSERT(rit->getVal() == before_fence);
1317 for (;rit != NULL;rit=rit->getPrev()) {
1318 ModelAction *act = rit->getVal();
1319 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1326 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1327 * location). This function identifies the mutex according to the current
1328 * action, which is presumed to perform on the same mutex.
1329 * @param curr The current ModelAction; also denotes the object location to
1331 * @return The last unlock operation
1333 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1335 void *location = curr->get_location();
1337 action_list_t *list = obj_map.get(location);
1338 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1339 sllnode<ModelAction*>* rit;
1340 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1341 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1342 return rit->getVal();
1346 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1348 ModelAction *parent = get_last_action(tid);
1350 parent = get_thread(tid)->get_creation();
1355 * Returns the clock vector for a given thread.
1356 * @param tid The thread whose clock vector we want
1357 * @return Desired clock vector
1359 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1361 ModelAction *firstaction=get_parent_action(tid);
1362 return firstaction != NULL ? firstaction->get_cv() : NULL;
1365 bool valequals(uint64_t val1, uint64_t val2, int size) {
1368 return ((uint8_t)val1) == ((uint8_t)val2);
1370 return ((uint16_t)val1) == ((uint16_t)val2);
1372 return ((uint32_t)val1) == ((uint32_t)val2);
1382 * Build up an initial set of all past writes that this 'read' action may read
1383 * from, as well as any previously-observed future values that must still be valid.
1385 * @param curr is the current ModelAction that we are exploring; it must be a
1388 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1390 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1392 ASSERT(curr->is_read());
1394 ModelAction *last_sc_write = NULL;
1396 if (curr->is_seqcst())
1397 last_sc_write = get_last_seq_cst_write(curr);
1399 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1401 /* Iterate over all threads */
1402 for (i = 0;i < thrd_lists->size();i++) {
1403 /* Iterate over actions in thread, starting from most recent */
1404 action_list_t *list = &(*thrd_lists)[i];
1405 sllnode<ModelAction *> * rit;
1406 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1407 ModelAction *act = rit->getVal();
1412 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1413 bool allow_read = true;
1415 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1418 /* Need to check whether we will have two RMW reading from the same value */
1419 if (curr->is_rmwr()) {
1420 /* It is okay if we have a failing CAS */
1421 if (!curr->is_rmwrcas() ||
1422 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1423 //Need to make sure we aren't the second RMW
1424 CycleNode * node = mo_graph->getNode_noCreate(act);
1425 if (node != NULL && node->getRMW() != NULL) {
1426 //we are the second RMW
1433 /* Only add feasible reads */
1434 rf_set->push_back(act);
1437 /* Include at most one act per-thread that "happens before" curr */
1438 if (act->happens_before(curr))
1443 if (DBG_ENABLED()) {
1444 model_print("Reached read action:\n");
1446 model_print("End printing read_from_past\n");
1452 * @brief Get an action representing an uninitialized atomic
1454 * This function may create a new one.
1456 * @param curr The current action, which prompts the creation of an UNINIT action
1457 * @return A pointer to the UNINIT ModelAction
1459 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1461 ModelAction *act = curr->get_uninit_action();
1463 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1464 curr->set_uninit_action(act);
1466 act->create_cv(NULL);
1470 static void print_list(action_list_t *list)
1472 sllnode<ModelAction*> *it;
1474 model_print("------------------------------------------------------------------------------------\n");
1475 model_print("# t Action type MO Location Value Rf CV\n");
1476 model_print("------------------------------------------------------------------------------------\n");
1478 unsigned int hash = 0;
1480 for (it = list->begin();it != NULL;it=it->getNext()) {
1481 const ModelAction *act = it->getVal();
1482 if (act->get_seq_number() > 0)
1484 hash = hash^(hash<<3)^(it->getVal()->hash());
1486 model_print("HASH %u\n", hash);
1487 model_print("------------------------------------------------------------------------------------\n");
1490 #if SUPPORT_MOD_ORDER_DUMP
1491 void ModelExecution::dumpGraph(char *filename)
1494 sprintf(buffer, "%s.dot", filename);
1495 FILE *file = fopen(buffer, "w");
1496 fprintf(file, "digraph %s {\n", filename);
1497 mo_graph->dumpNodes(file);
1498 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1500 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1501 ModelAction *act = it->getVal();
1502 if (act->is_read()) {
1503 mo_graph->dot_print_node(file, act);
1504 mo_graph->dot_print_edge(file,
1505 act->get_reads_from(),
1507 "label=\"rf\", color=red, weight=2");
1509 if (thread_array[act->get_tid()]) {
1510 mo_graph->dot_print_edge(file,
1511 thread_array[id_to_int(act->get_tid())],
1513 "label=\"sb\", color=blue, weight=400");
1516 thread_array[act->get_tid()] = act;
1518 fprintf(file, "}\n");
1519 model_free(thread_array);
1524 /** @brief Prints an execution trace summary. */
1525 void ModelExecution::print_summary()
1527 #if SUPPORT_MOD_ORDER_DUMP
1528 char buffername[100];
1529 sprintf(buffername, "exec%04u", get_execution_number());
1530 mo_graph->dumpGraphToFile(buffername);
1531 sprintf(buffername, "graph%04u", get_execution_number());
1532 dumpGraph(buffername);
1535 model_print("Execution trace %d:", get_execution_number());
1536 if (isfeasibleprefix()) {
1537 if (scheduler->all_threads_sleeping())
1538 model_print(" SLEEP-SET REDUNDANT");
1539 if (have_bug_reports())
1540 model_print(" DETECTED BUG(S)");
1542 print_infeasibility(" INFEASIBLE");
1545 print_list(&action_trace);
1551 * Add a Thread to the system for the first time. Should only be called once
1553 * @param t The Thread to add
1555 void ModelExecution::add_thread(Thread *t)
1557 unsigned int i = id_to_int(t->get_id());
1558 if (i >= thread_map.size())
1559 thread_map.resize(i + 1);
1561 if (!t->is_model_thread())
1562 scheduler->add_thread(t);
1566 * @brief Get a Thread reference by its ID
1567 * @param tid The Thread's ID
1568 * @return A Thread reference
1570 Thread * ModelExecution::get_thread(thread_id_t tid) const
1572 unsigned int i = id_to_int(tid);
1573 if (i < thread_map.size())
1574 return thread_map[i];
1579 * @brief Get a reference to the Thread in which a ModelAction was executed
1580 * @param act The ModelAction
1581 * @return A Thread reference
1583 Thread * ModelExecution::get_thread(const ModelAction *act) const
1585 return get_thread(act->get_tid());
1589 * @brief Get a Thread reference by its pthread ID
1590 * @param index The pthread's ID
1591 * @return A Thread reference
1593 Thread * ModelExecution::get_pthread(pthread_t pid) {
1599 uint32_t thread_id = x.v;
1600 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1605 * @brief Check if a Thread is currently enabled
1606 * @param t The Thread to check
1607 * @return True if the Thread is currently enabled
1609 bool ModelExecution::is_enabled(Thread *t) const
1611 return scheduler->is_enabled(t);
1615 * @brief Check if a Thread is currently enabled
1616 * @param tid The ID of the Thread to check
1617 * @return True if the Thread is currently enabled
1619 bool ModelExecution::is_enabled(thread_id_t tid) const
1621 return scheduler->is_enabled(tid);
1625 * @brief Select the next thread to execute based on the curren action
1627 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1628 * actions should be followed by the execution of their child thread. In either
1629 * case, the current action should determine the next thread schedule.
1631 * @param curr The current action
1632 * @return The next thread to run, if the current action will determine this
1633 * selection; otherwise NULL
1635 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1637 /* Do not split atomic RMW */
1638 if (curr->is_rmwr())
1639 return get_thread(curr);
1640 /* Follow CREATE with the created thread */
1641 /* which is not needed, because model.cc takes care of this */
1642 if (curr->get_type() == THREAD_CREATE)
1643 return curr->get_thread_operand();
1644 if (curr->get_type() == PTHREAD_CREATE) {
1645 return curr->get_thread_operand();
1651 * Takes the next step in the execution, if possible.
1652 * @param curr The current step to take
1653 * @return Returns the next Thread to run, if any; NULL if this execution
1656 Thread * ModelExecution::take_step(ModelAction *curr)
1658 Thread *curr_thrd = get_thread(curr);
1659 ASSERT(curr_thrd->get_state() == THREAD_READY);
1661 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1662 curr = check_current_action(curr);
1665 /* Process this action in ModelHistory for records */
1666 model->get_history()->process_action( curr, curr->get_tid() );
1668 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1669 scheduler->remove_thread(curr_thrd);
1671 return action_select_next_thread(curr);
1674 Fuzzer * ModelExecution::getFuzzer() {