10 #include "nodestack.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
32 failed_promise(false),
33 hard_failed_promise(false),
34 too_many_reads(false),
35 no_valid_reads(false),
36 bad_synchronization(false),
41 ~model_snapshot_members() {
42 for (unsigned int i = 0; i < bugs.size(); i++)
47 unsigned int next_thread_id;
48 modelclock_t used_sequence_numbers;
49 ModelAction *next_backtrack;
50 SnapVector<bug_message *> bugs;
52 bool hard_failed_promise;
55 /** @brief Incorrectly-ordered synchronization was made */
56 bool bad_synchronization;
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m,
65 const struct model_params *params,
67 NodeStack *node_stack) :
72 thread_map(2), /* We'll always need at least 2 threads */
74 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 node_stack(node_stack),
82 priv(new struct model_snapshot_members()),
83 mo_graph(new CycleGraph())
85 /* Initialize a model-checker thread, for special ModelActions */
86 model_thread = new Thread(get_next_id()); // L: Create model thread
87 add_thread(model_thread); // L: Add model thread to scheduler
88 scheduler->register_engine(this);
89 node_stack->register_engine(this);
92 /** @brief Destructor */
93 ModelExecution::~ModelExecution()
95 for (unsigned int i = 0; i < get_num_threads(); i++)
96 delete get_thread(int_to_id(i));
98 for (unsigned int i = 0; i < promises.size(); i++)
105 int ModelExecution::get_execution_number() const
107 return model->get_execution_number();
110 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
112 action_list_t *tmp = hash->get(ptr);
114 tmp = new action_list_t();
120 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
122 SnapVector<action_list_t> *tmp = hash->get(ptr);
124 tmp = new SnapVector<action_list_t>();
130 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
132 SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
135 unsigned int thread=id_to_int(tid);
136 if (thread < wrv->size())
137 return &(*wrv)[thread];
142 /** @return a thread ID for a new Thread */
143 thread_id_t ModelExecution::get_next_id()
145 return priv->next_thread_id++;
148 /** @return the number of user threads created during this execution */
149 unsigned int ModelExecution::get_num_threads() const
151 return priv->next_thread_id;
154 /** @return a sequence number for a new ModelAction */
155 modelclock_t ModelExecution::get_next_seq_num()
157 return ++priv->used_sequence_numbers;
161 * @brief Should the current action wake up a given thread?
163 * @param curr The current action
164 * @param thread The thread that we might wake up
165 * @return True, if we should wake up the sleeping thread; false otherwise
167 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
169 const ModelAction *asleep = thread->get_pending();
170 /* Don't allow partial RMW to wake anyone up */
173 /* Synchronizing actions may have been backtracked */
174 if (asleep->could_synchronize_with(curr))
176 /* All acquire/release fences and fence-acquire/store-release */
177 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
179 /* Fence-release + store can awake load-acquire on the same location */
180 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
181 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
182 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
188 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
190 for (unsigned int i = 0; i < get_num_threads(); i++) {
191 Thread *thr = get_thread(int_to_id(i));
192 if (scheduler->is_sleep_set(thr)) {
193 if (should_wake_up(curr, thr))
194 /* Remove this thread from sleep set */
195 scheduler->remove_sleep(thr);
200 /** @brief Alert the model-checker that an incorrectly-ordered
201 * synchronization was made */
202 void ModelExecution::set_bad_synchronization()
204 priv->bad_synchronization = true;
207 /** @brief Alert the model-checker that an incorrectly-ordered
208 * synchronization was made */
209 void ModelExecution::set_bad_sc_read()
211 priv->bad_sc_read = true;
214 bool ModelExecution::assert_bug(const char *msg)
216 priv->bugs.push_back(new bug_message(msg));
218 if (isfeasibleprefix()) {
225 /** @return True, if any bugs have been reported for this execution */
226 bool ModelExecution::have_bug_reports() const
228 return priv->bugs.size() != 0;
231 SnapVector<bug_message *> * ModelExecution::get_bugs() const
237 * Check whether the current trace has triggered an assertion which should halt
240 * @return True, if the execution should be aborted; false otherwise
242 bool ModelExecution::has_asserted() const
244 return priv->asserted;
248 * Trigger a trace assertion which should cause this execution to be halted.
249 * This can be due to a detected bug or due to an infeasibility that should
252 void ModelExecution::set_assert()
254 priv->asserted = true;
258 * Check if we are in a deadlock. Should only be called at the end of an
259 * execution, although it should not give false positives in the middle of an
260 * execution (there should be some ENABLED thread).
262 * @return True if program is in a deadlock; false otherwise
264 bool ModelExecution::is_deadlocked() const
266 bool blocking_threads = false;
267 for (unsigned int i = 0; i < get_num_threads(); i++) {
268 thread_id_t tid = int_to_id(i);
271 Thread *t = get_thread(tid);
272 if (!t->is_model_thread() && t->get_pending())
273 blocking_threads = true;
275 return blocking_threads;
279 * @brief Check if we are yield-blocked
281 * A program can be "yield-blocked" if all threads are ready to execute a
284 * @return True if the program is yield-blocked; false otherwise
286 bool ModelExecution::is_yieldblocked() const
288 if (!params->yieldblock)
291 for (unsigned int i = 0; i < get_num_threads(); i++) {
292 thread_id_t tid = int_to_id(i);
293 Thread *t = get_thread(tid);
294 if (t->get_pending() && t->get_pending()->is_yield())
301 * Check if this is a complete execution. That is, have all thread completed
302 * execution (rather than exiting because sleep sets have forced a redundant
305 * @return True if the execution is complete.
307 bool ModelExecution::is_complete_execution() const
309 if (is_yieldblocked())
311 for (unsigned int i = 0; i < get_num_threads(); i++)
312 if (is_enabled(int_to_id(i)))
318 * @brief Find the last fence-related backtracking conflict for a ModelAction
320 * This function performs the search for the most recent conflicting action
321 * against which we should perform backtracking, as affected by fence
322 * operations. This includes pairs of potentially-synchronizing actions which
323 * occur due to fence-acquire or fence-release, and hence should be explored in
324 * the opposite execution order.
326 * @param act The current action
327 * @return The most recent action which conflicts with act due to fences
329 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
331 /* Only perform release/acquire fence backtracking for stores */
332 if (!act->is_write())
335 /* Find a fence-release (or, act is a release) */
336 ModelAction *last_release;
337 if (act->is_release())
340 last_release = get_last_fence_release(act->get_tid());
344 /* Skip past the release */
345 const action_list_t *list = &action_trace;
346 action_list_t::const_reverse_iterator rit;
347 for (rit = list->rbegin(); rit != list->rend(); rit++)
348 if (*rit == last_release)
350 ASSERT(rit != list->rend());
355 * load --sb-> fence-acquire */
356 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
357 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
358 bool found_acquire_fences = false;
359 for ( ; rit != list->rend(); rit++) {
360 ModelAction *prev = *rit;
361 if (act->same_thread(prev))
364 int tid = id_to_int(prev->get_tid());
366 if (prev->is_read() && act->same_var(prev)) {
367 if (prev->is_acquire()) {
368 /* Found most recent load-acquire, don't need
369 * to search for more fences */
370 if (!found_acquire_fences)
373 prior_loads[tid] = prev;
376 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
377 found_acquire_fences = true;
378 acquire_fences[tid] = prev;
382 ModelAction *latest_backtrack = NULL;
383 for (unsigned int i = 0; i < acquire_fences.size(); i++)
384 if (acquire_fences[i] && prior_loads[i])
385 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
386 latest_backtrack = acquire_fences[i];
387 return latest_backtrack;
391 * @brief Find the last backtracking conflict for a ModelAction
393 * This function performs the search for the most recent conflicting action
394 * against which we should perform backtracking. This primary includes pairs of
395 * synchronizing actions which should be explored in the opposite execution
398 * @param act The current action
399 * @return The most recent action which conflicts with act
401 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
403 switch (act->get_type()) {
405 /* Only seq-cst fences can (directly) cause backtracking */
406 if (!act->is_seqcst())
411 ModelAction *ret = NULL;
413 /* linear search: from most recent to oldest */
414 action_list_t *list = obj_map.get(act->get_location());
415 action_list_t::reverse_iterator rit;
416 for (rit = list->rbegin(); rit != list->rend(); rit++) {
417 ModelAction *prev = *rit;
420 if (prev->could_synchronize_with(act)) {
426 ModelAction *ret2 = get_last_fence_conflict(act);
436 case ATOMIC_TRYLOCK: {
437 /* linear search: from most recent to oldest */
438 action_list_t *list = obj_map.get(act->get_location());
439 action_list_t::reverse_iterator rit;
440 for (rit = list->rbegin(); rit != list->rend(); rit++) {
441 ModelAction *prev = *rit;
442 if (act->is_conflicting_lock(prev))
447 case ATOMIC_UNLOCK: {
448 /* linear search: from most recent to oldest */
449 action_list_t *list = obj_map.get(act->get_location());
450 action_list_t::reverse_iterator rit;
451 for (rit = list->rbegin(); rit != list->rend(); rit++) {
452 ModelAction *prev = *rit;
453 if (!act->same_thread(prev) && prev->is_failed_trylock())
459 /* linear search: from most recent to oldest */
460 action_list_t *list = obj_map.get(act->get_location());
461 action_list_t::reverse_iterator rit;
462 for (rit = list->rbegin(); rit != list->rend(); rit++) {
463 ModelAction *prev = *rit;
464 if (!act->same_thread(prev) && prev->is_failed_trylock())
466 if (!act->same_thread(prev) && prev->is_notify())
472 case ATOMIC_NOTIFY_ALL:
473 case ATOMIC_NOTIFY_ONE: {
474 /* linear search: from most recent to oldest */
475 action_list_t *list = obj_map.get(act->get_location());
476 action_list_t::reverse_iterator rit;
477 for (rit = list->rbegin(); rit != list->rend(); rit++) {
478 ModelAction *prev = *rit;
479 if (!act->same_thread(prev) && prev->is_wait())
490 /** This method finds backtracking points where we should try to
491 * reorder the parameter ModelAction against.
493 * @param the ModelAction to find backtracking points for.
495 void ModelExecution::set_backtracking(ModelAction *act)
497 Thread *t = get_thread(act);
498 ModelAction *prev = get_last_conflict(act);
502 Node *node = prev->get_node()->get_parent();
504 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
505 int low_tid, high_tid;
506 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
507 low_tid = id_to_int(act->get_tid());
508 high_tid = low_tid + 1;
511 high_tid = get_num_threads();
514 for (int i = low_tid; i < high_tid; i++) {
515 thread_id_t tid = int_to_id(i);
517 /* Make sure this thread can be enabled here. */
518 if (i >= node->get_num_threads())
521 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
522 /* Don't backtrack into a point where the thread is disabled or sleeping. */
523 if (node->enabled_status(tid) != THREAD_ENABLED)
526 /* Check if this has been explored already */
527 if (node->has_been_explored(tid))
530 /* See if fairness allows */
531 if (params->fairwindow != 0 && !node->has_priority(tid)) {
533 for (int t = 0; t < node->get_num_threads(); t++) {
534 thread_id_t tother = int_to_id(t);
535 if (node->is_enabled(tother) && node->has_priority(tother)) {
544 /* See if CHESS-like yield fairness allows */
545 if (params->yieldon) {
547 for (int t = 0; t < node->get_num_threads(); t++) {
548 thread_id_t tother = int_to_id(t);
549 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
558 /* Cache the latest backtracking point */
559 set_latest_backtrack(prev);
561 /* If this is a new backtracking point, mark the tree */
562 if (!node->set_backtrack(tid))
564 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
565 id_to_int(prev->get_tid()),
566 id_to_int(t->get_id()));
575 * @brief Cache the a backtracking point as the "most recent", if eligible
577 * Note that this does not prepare the NodeStack for this backtracking
578 * operation, it only caches the action on a per-execution basis
580 * @param act The operation at which we should explore a different next action
581 * (i.e., backtracking point)
582 * @return True, if this action is now the most recent backtracking point;
585 bool ModelExecution::set_latest_backtrack(ModelAction *act)
587 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
588 priv->next_backtrack = act;
595 * Returns last backtracking point. The model checker will explore a different
596 * path for this point in the next execution.
597 * @return The ModelAction at which the next execution should diverge.
599 ModelAction * ModelExecution::get_next_backtrack()
601 ModelAction *next = priv->next_backtrack;
602 priv->next_backtrack = NULL;
607 * Processes a read model action.
608 * @param curr is the read model action to process.
609 * @return True if processing this read updates the mo_graph.
611 bool ModelExecution::process_read(ModelAction *curr)
613 Node *node = curr->get_node();
615 bool updated = false;
616 switch (node->get_read_from_status()) {
617 case READ_FROM_PAST: {
618 const ModelAction *rf = node->get_read_from_past();
621 mo_graph->startChanges();
623 ASSERT(!is_infeasible());
624 if (!check_recency(curr, rf)) {
625 if (node->increment_read_from()) {
626 mo_graph->rollbackChanges();
629 priv->too_many_reads = true;
633 updated = r_modification_order(curr, rf);
635 mo_graph->commitChanges();
636 mo_check_promises(curr, true);
639 case READ_FROM_PROMISE: {
640 Promise *promise = curr->get_node()->get_read_from_promise();
641 if (promise->add_reader(curr))
642 priv->failed_promise = true;
643 curr->set_read_from_promise(promise);
644 mo_graph->startChanges();
645 if (!check_recency(curr, promise))
646 priv->too_many_reads = true;
647 updated = r_modification_order(curr, promise);
648 mo_graph->commitChanges();
651 case READ_FROM_FUTURE: {
652 /* Read from future value */
653 struct future_value fv = node->get_future_value();
654 Promise *promise = new Promise(this, curr, fv);
655 curr->set_read_from_promise(promise);
656 promises.push_back(promise);
657 mo_graph->startChanges();
658 updated = r_modification_order(curr, promise);
659 mo_graph->commitChanges();
665 get_thread(curr)->set_return_value(curr->get_return_value());
671 * Processes a lock, trylock, or unlock model action. @param curr is
672 * the read model action to process.
674 * The try lock operation checks whether the lock is taken. If not,
675 * it falls to the normal lock operation case. If so, it returns
678 * The lock operation has already been checked that it is enabled, so
679 * it just grabs the lock and synchronizes with the previous unlock.
681 * The unlock operation has to re-enable all of the threads that are
682 * waiting on the lock.
684 * @return True if synchronization was updated; false otherwise
686 bool ModelExecution::process_mutex(ModelAction *curr)
688 std::mutex *mutex = curr->get_mutex();
689 struct std::mutex_state *state = NULL;
692 state = mutex->get_state();
694 switch (curr->get_type()) {
695 case ATOMIC_TRYLOCK: {
696 bool success = !state->locked;
697 curr->set_try_lock(success);
699 get_thread(curr)->set_return_value(0);
702 get_thread(curr)->set_return_value(1);
704 //otherwise fall into the lock case
706 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
707 assert_bug("Lock access before initialization");
708 state->locked = get_thread(curr);
709 ModelAction *unlock = get_last_unlock(curr);
710 //synchronize with the previous unlock statement
711 if (unlock != NULL) {
712 synchronize(unlock, curr);
718 case ATOMIC_UNLOCK: {
719 /* wake up the other threads */
720 for (unsigned int i = 0; i < get_num_threads(); i++) {
721 Thread *t = get_thread(int_to_id(i));
722 Thread *curr_thrd = get_thread(curr);
723 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
727 /* unlock the lock - after checking who was waiting on it */
728 state->locked = NULL;
730 if (!curr->is_wait())
731 break; /* The rest is only for ATOMIC_WAIT */
733 /* Should we go to sleep? (simulate spurious failures) */
734 if (curr->get_node()->get_misc() == 0) {
735 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
737 scheduler->sleep(get_thread(curr));
741 case ATOMIC_NOTIFY_ALL: {
742 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
743 //activate all the waiting threads
744 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
745 scheduler->wake(get_thread(*rit));
750 case ATOMIC_NOTIFY_ONE: {
751 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
752 int wakeupthread = curr->get_node()->get_misc();
753 action_list_t::iterator it = waiters->begin();
754 advance(it, wakeupthread);
755 scheduler->wake(get_thread(*it));
767 * @brief Check if the current pending promises allow a future value to be sent
769 * It is unsafe to pass a future value back if there exists a pending promise Pr
772 * reader --exec-> Pr --exec-> writer
774 * If such Pr exists, we must save the pending future value until Pr is
777 * @param writer The operation which sends the future value. Must be a write.
778 * @param reader The operation which will observe the value. Must be a read.
779 * @return True if the future value can be sent now; false if it must wait.
781 bool ModelExecution::promises_may_allow(const ModelAction *writer,
782 const ModelAction *reader) const
784 for (int i = promises.size() - 1; i >= 0; i--) {
785 ModelAction *pr = promises[i]->get_reader(0);
786 //reader is after promise...doesn't cross any promise
789 //writer is after promise, reader before...bad...
797 * @brief Add a future value to a reader
799 * This function performs a few additional checks to ensure that the future
800 * value can be feasibly observed by the reader
802 * @param writer The operation whose value is sent. Must be a write.
803 * @param reader The read operation which may read the future value. Must be a read.
805 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
807 /* Do more ambitious checks now that mo is more complete */
808 if (!mo_may_allow(writer, reader))
811 Node *node = reader->get_node();
813 /* Find an ancestor thread which exists at the time of the reader */
814 Thread *write_thread = get_thread(writer);
815 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
816 write_thread = write_thread->get_parent();
818 struct future_value fv = {
819 writer->get_write_value(),
820 writer->get_seq_number() + params->maxfuturedelay,
821 write_thread->get_id(),
823 if (node->add_future_value(fv))
824 set_latest_backtrack(reader);
828 * Process a write ModelAction
829 * @param curr The ModelAction to process
830 * @param work The work queue, for adding fixup work
831 * @return True if the mo_graph was updated or promises were resolved
833 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
835 /* Readers to which we may send our future value */
836 ModelVector<ModelAction *> send_fv;
838 const ModelAction *earliest_promise_reader;
839 bool updated_promises = false;
841 bool updated_mod_order = w_modification_order(curr, &send_fv);
842 Promise *promise = pop_promise_to_resolve(curr);
845 earliest_promise_reader = promise->get_reader(0);
846 updated_promises = resolve_promise(curr, promise, work);
848 earliest_promise_reader = NULL;
850 for (unsigned int i = 0; i < send_fv.size(); i++) {
851 ModelAction *read = send_fv[i];
853 /* Don't send future values to reads after the Promise we resolve */
854 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
855 /* Check if future value can be sent immediately */
856 if (promises_may_allow(curr, read)) {
857 add_future_value(curr, read);
859 futurevalues.push_back(PendingFutureValue(curr, read));
864 /* Check the pending future values */
865 for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
866 struct PendingFutureValue pfv = futurevalues[i];
867 if (promises_may_allow(pfv.writer, pfv.reader)) {
868 add_future_value(pfv.writer, pfv.reader);
869 futurevalues.erase(futurevalues.begin() + i);
873 mo_graph->commitChanges();
874 mo_check_promises(curr, false);
876 get_thread(curr)->set_return_value(VALUE_NONE);
877 return updated_mod_order || updated_promises;
881 * Process a fence ModelAction
882 * @param curr The ModelAction to process
883 * @return True if synchronization was updated
885 bool ModelExecution::process_fence(ModelAction *curr)
888 * fence-relaxed: no-op
889 * fence-release: only log the occurence (not in this function), for
890 * use in later synchronization
891 * fence-acquire (this function): search for hypothetical release
893 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
895 bool updated = false;
896 if (curr->is_acquire()) {
897 action_list_t *list = &action_trace;
898 action_list_t::reverse_iterator rit;
899 /* Find X : is_read(X) && X --sb-> curr */
900 for (rit = list->rbegin(); rit != list->rend(); rit++) {
901 ModelAction *act = *rit;
904 if (act->get_tid() != curr->get_tid())
906 /* Stop at the beginning of the thread */
907 if (act->is_thread_start())
909 /* Stop once we reach a prior fence-acquire */
910 if (act->is_fence() && act->is_acquire())
914 /* read-acquire will find its own release sequences */
915 if (act->is_acquire())
918 /* Establish hypothetical release sequences */
919 rel_heads_list_t release_heads;
920 get_release_seq_heads(curr, act, &release_heads);
921 for (unsigned int i = 0; i < release_heads.size(); i++)
922 synchronize(release_heads[i], curr);
923 if (release_heads.size() != 0)
931 * @brief Process the current action for thread-related activity
933 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
934 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
935 * synchronization, etc. This function is a no-op for non-THREAD actions
936 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
938 * @param curr The current action
939 * @return True if synchronization was updated or a thread completed
941 bool ModelExecution::process_thread_action(ModelAction *curr)
943 bool updated = false;
945 switch (curr->get_type()) {
946 case THREAD_CREATE: {
947 thrd_t *thrd = (thrd_t *)curr->get_location();
948 struct thread_params *params = (struct thread_params *)curr->get_value();
949 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
951 th->set_creation(curr);
952 /* Promises can be satisfied by children */
953 for (unsigned int i = 0; i < promises.size(); i++) {
954 Promise *promise = promises[i];
955 if (promise->thread_is_available(curr->get_tid()))
956 promise->add_thread(th->get_id());
961 Thread *blocking = curr->get_thread_operand();
962 ModelAction *act = get_last_action(blocking->get_id());
963 synchronize(act, curr);
964 updated = true; /* trigger rel-seq checks */
967 case THREAD_FINISH: {
968 Thread *th = get_thread(curr);
969 /* Wake up any joining threads */
970 for (unsigned int i = 0; i < get_num_threads(); i++) {
971 Thread *waiting = get_thread(int_to_id(i));
972 if (waiting->waiting_on() == th &&
973 waiting->get_pending()->is_thread_join())
974 scheduler->wake(waiting);
977 /* Completed thread can't satisfy promises */
978 for (unsigned int i = 0; i < promises.size(); i++) {
979 Promise *promise = promises[i];
980 if (promise->thread_is_available(th->get_id()))
981 if (promise->eliminate_thread(th->get_id()))
982 priv->failed_promise = true;
984 updated = true; /* trigger rel-seq checks */
988 check_promises(curr->get_tid(), NULL, curr->get_cv());
999 * @brief Process the current action for release sequence fixup activity
1001 * Performs model-checker release sequence fixups for the current action,
1002 * forcing a single pending release sequence to break (with a given, potential
1003 * "loose" write) or to complete (i.e., synchronize). If a pending release
1004 * sequence forms a complete release sequence, then we must perform the fixup
1005 * synchronization, mo_graph additions, etc.
1007 * @param curr The current action; must be a release sequence fixup action
1008 * @param work_queue The work queue to which to add work items as they are
1011 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1013 const ModelAction *write = curr->get_node()->get_relseq_break();
1014 struct release_seq *sequence = pending_rel_seqs.back();
1015 pending_rel_seqs.pop_back();
1017 ModelAction *acquire = sequence->acquire;
1018 const ModelAction *rf = sequence->rf;
1019 const ModelAction *release = sequence->release;
1023 ASSERT(release->same_thread(rf));
1025 if (write == NULL) {
1027 * @todo Forcing a synchronization requires that we set
1028 * modification order constraints. For instance, we can't allow
1029 * a fixup sequence in which two separate read-acquire
1030 * operations read from the same sequence, where the first one
1031 * synchronizes and the other doesn't. Essentially, we can't
1032 * allow any writes to insert themselves between 'release' and
1036 /* Must synchronize */
1037 if (!synchronize(release, acquire))
1040 /* Propagate the changed clock vector */
1041 propagate_clockvector(acquire, work_queue);
1043 /* Break release sequence with new edges:
1044 * release --mo--> write --mo--> rf */
1045 mo_graph->addEdge(release, write);
1046 mo_graph->addEdge(write, rf);
1049 /* See if we have realized a data race */
1054 * Initialize the current action by performing one or more of the following
1055 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1056 * in the NodeStack, manipulating backtracking sets, allocating and
1057 * initializing clock vectors, and computing the promises to fulfill.
1059 * @param curr The current action, as passed from the user context; may be
1060 * freed/invalidated after the execution of this function, with a different
1061 * action "returned" its place (pass-by-reference)
1062 * @return True if curr is a newly-explored action; false otherwise
1064 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1066 ModelAction *newcurr;
1068 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1069 newcurr = process_rmw(*curr);
1072 if (newcurr->is_rmw())
1073 compute_promises(newcurr);
1079 (*curr)->set_seq_number(get_next_seq_num());
1081 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1083 /* First restore type and order in case of RMW operation */
1084 if ((*curr)->is_rmwr())
1085 newcurr->copy_typeandorder(*curr);
1087 ASSERT((*curr)->get_location() == newcurr->get_location());
1088 newcurr->copy_from_new(*curr);
1090 /* Discard duplicate ModelAction; use action from NodeStack */
1093 /* Always compute new clock vector */
1094 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1097 return false; /* Action was explored previously */
1101 /* Always compute new clock vector */
1102 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1104 /* Assign most recent release fence */
1105 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1108 * Perform one-time actions when pushing new ModelAction onto
1111 if (newcurr->is_write())
1112 compute_promises(newcurr);
1113 else if (newcurr->is_relseq_fixup())
1114 compute_relseq_breakwrites(newcurr);
1115 else if (newcurr->is_wait())
1116 newcurr->get_node()->set_misc_max(2);
1117 else if (newcurr->is_notify_one()) {
1118 newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1120 return true; /* This was a new ModelAction */
1125 * @brief Establish reads-from relation between two actions
1127 * Perform basic operations involved with establishing a concrete rf relation,
1128 * including setting the ModelAction data and checking for release sequences.
1130 * @param act The action that is reading (must be a read)
1131 * @param rf The action from which we are reading (must be a write)
1133 * @return True if this read established synchronization
1136 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1139 ASSERT(rf->is_write());
1141 act->set_read_from(rf);
1142 if (act->is_acquire()) {
1143 rel_heads_list_t release_heads;
1144 get_release_seq_heads(act, act, &release_heads);
1145 int num_heads = release_heads.size();
1146 for (unsigned int i = 0; i < release_heads.size(); i++)
1147 if (!synchronize(release_heads[i], act))
1149 return num_heads > 0;
1155 * @brief Synchronizes two actions
1157 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1158 * This function performs the synchronization as well as providing other hooks
1159 * for other checks along with synchronization.
1161 * @param first The left-hand side of the synchronizes-with relation
1162 * @param second The right-hand side of the synchronizes-with relation
1163 * @return True if the synchronization was successful (i.e., was consistent
1164 * with the execution order); false otherwise
1166 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1168 if (*second < *first) {
1169 set_bad_synchronization();
1172 check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1173 return second->synchronize_with(first);
1177 * Check promises and eliminate potentially-satisfying threads when a thread is
1178 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1179 * no longer satisfy a promise generated from that thread.
1181 * @param blocker The thread on which a thread is waiting
1182 * @param waiting The waiting thread
1184 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1186 for (unsigned int i = 0; i < promises.size(); i++) {
1187 Promise *promise = promises[i];
1188 if (!promise->thread_is_available(waiting->get_id()))
1190 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1191 ModelAction *reader = promise->get_reader(j);
1192 if (reader->get_tid() != blocker->get_id())
1194 if (promise->eliminate_thread(waiting->get_id())) {
1195 /* Promise has failed */
1196 priv->failed_promise = true;
1198 /* Only eliminate the 'waiting' thread once */
1206 * @brief Check whether a model action is enabled.
1208 * Checks whether an operation would be successful (i.e., is a lock already
1209 * locked, or is the joined thread already complete).
1211 * For yield-blocking, yields are never enabled.
1213 * @param curr is the ModelAction to check whether it is enabled.
1214 * @return a bool that indicates whether the action is enabled.
1216 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1217 if (curr->is_lock()) {
1218 std::mutex *lock = curr->get_mutex();
1219 struct std::mutex_state *state = lock->get_state();
1222 } else if (curr->is_thread_join()) {
1223 Thread *blocking = curr->get_thread_operand();
1224 if (!blocking->is_complete()) {
1225 thread_blocking_check_promises(blocking, get_thread(curr));
1228 } else if (params->yieldblock && curr->is_yield()) {
1236 * This is the heart of the model checker routine. It performs model-checking
1237 * actions corresponding to a given "current action." Among other processes, it
1238 * calculates reads-from relationships, updates synchronization clock vectors,
1239 * forms a memory_order constraints graph, and handles replay/backtrack
1240 * execution when running permutations of previously-observed executions.
1242 * @param curr The current action to process
1243 * @return The ModelAction that is actually executed; may be different than
1246 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1249 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1250 bool newly_explored = initialize_curr_action(&curr);
1254 wake_up_sleeping_actions(curr);
1256 /* Compute fairness information for CHESS yield algorithm */
1257 if (params->yieldon) {
1258 curr->get_node()->update_yield(scheduler);
1261 /* Add the action to lists before any other model-checking tasks */
1262 if (!second_part_of_rmw)
1263 add_action_to_lists(curr);
1265 /* Build may_read_from set for newly-created actions */
1266 if (newly_explored && curr->is_read())
1267 build_may_read_from(curr);
1269 /* Initialize work_queue with the "current action" work */
1270 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1271 while (!work_queue.empty() && !has_asserted()) {
1272 WorkQueueEntry work = work_queue.front();
1273 work_queue.pop_front();
1275 switch (work.type) {
1276 case WORK_CHECK_CURR_ACTION: {
1277 ModelAction *act = work.action;
1278 bool update = false; /* update this location's release seq's */
1279 bool update_all = false; /* update all release seq's */
1281 if (process_thread_action(curr))
1284 if (act->is_read() && !second_part_of_rmw && process_read(act))
1287 if (act->is_write() && process_write(act, &work_queue))
1290 if (act->is_fence() && process_fence(act))
1293 if (act->is_mutex_op() && process_mutex(act))
1296 if (act->is_relseq_fixup())
1297 process_relseq_fixup(curr, &work_queue);
1300 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1302 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1305 case WORK_CHECK_RELEASE_SEQ:
1306 resolve_release_sequences(work.location, &work_queue);
1308 case WORK_CHECK_MO_EDGES: {
1309 /** @todo Complete verification of work_queue */
1310 ModelAction *act = work.action;
1311 bool updated = false;
1313 if (act->is_read()) {
1314 const ModelAction *rf = act->get_reads_from();
1315 const Promise *promise = act->get_reads_from_promise();
1317 if (r_modification_order(act, rf))
1319 if (act->is_seqcst()) {
1320 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1321 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1325 } else if (promise) {
1326 if (r_modification_order(act, promise))
1330 if (act->is_write()) {
1331 if (w_modification_order(act, NULL))
1334 mo_graph->commitChanges();
1337 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1346 check_curr_backtracking(curr);
1347 set_backtracking(curr);
1351 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1353 Node *currnode = curr->get_node();
1354 Node *parnode = currnode->get_parent();
1356 if ((parnode && !parnode->backtrack_empty()) ||
1357 !currnode->misc_empty() ||
1358 !currnode->read_from_empty() ||
1359 !currnode->promise_empty() ||
1360 !currnode->relseq_break_empty()) {
1361 set_latest_backtrack(curr);
1365 bool ModelExecution::promises_expired() const
1367 for (unsigned int i = 0; i < promises.size(); i++) {
1368 Promise *promise = promises[i];
1369 if (promise->get_expiration() < priv->used_sequence_numbers)
1376 * This is the strongest feasibility check available.
1377 * @return whether the current trace (partial or complete) must be a prefix of
1380 bool ModelExecution::isfeasibleprefix() const
1382 return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1386 * Print disagnostic information about an infeasible execution
1387 * @param prefix A string to prefix the output with; if NULL, then a default
1388 * message prefix will be provided
1390 void ModelExecution::print_infeasibility(const char *prefix) const
1394 if (mo_graph->checkForCycles())
1395 ptr += sprintf(ptr, "[mo cycle]");
1396 if (priv->failed_promise || priv->hard_failed_promise)
1397 ptr += sprintf(ptr, "[failed promise]");
1398 if (priv->too_many_reads)
1399 ptr += sprintf(ptr, "[too many reads]");
1400 if (priv->no_valid_reads)
1401 ptr += sprintf(ptr, "[no valid reads-from]");
1402 if (priv->bad_synchronization)
1403 ptr += sprintf(ptr, "[bad sw ordering]");
1404 if (priv->bad_sc_read)
1405 ptr += sprintf(ptr, "[bad sc read]");
1406 if (promises_expired())
1407 ptr += sprintf(ptr, "[promise expired]");
1408 if (promises.size() != 0)
1409 ptr += sprintf(ptr, "[unresolved promise]");
1411 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1415 * Returns whether the current completed trace is feasible, except for pending
1416 * release sequences.
1418 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1420 return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1425 * Check if the current partial trace is infeasible. Does not check any
1426 * end-of-execution flags, which might rule out the execution. Thus, this is
1427 * useful only for ruling an execution as infeasible.
1428 * @return whether the current partial trace is infeasible.
1430 bool ModelExecution::is_infeasible() const
1432 return mo_graph->checkForCycles() ||
1433 priv->no_valid_reads ||
1434 priv->too_many_reads ||
1435 priv->bad_synchronization ||
1436 priv->bad_sc_read ||
1437 priv->hard_failed_promise ||
1441 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1442 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1443 ModelAction *lastread = get_last_action(act->get_tid());
1444 lastread->process_rmw(act);
1445 if (act->is_rmw()) {
1446 if (lastread->get_reads_from())
1447 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1449 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1450 mo_graph->commitChanges();
1456 * A helper function for ModelExecution::check_recency, to check if the current
1457 * thread is able to read from a different write/promise for 'params.maxreads'
1458 * number of steps and if that write/promise should become visible (i.e., is
1459 * ordered later in the modification order). This helps model memory liveness.
1461 * @param curr The current action. Must be a read.
1462 * @param rf The write/promise from which we plan to read
1463 * @param other_rf The write/promise from which we may read
1464 * @return True if we were able to read from other_rf for params.maxreads steps
1466 template <typename T, typename U>
1467 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1469 /* Need a different write/promise */
1470 if (other_rf->equals(rf))
1473 /* Only look for "newer" writes/promises */
1474 if (!mo_graph->checkReachable(rf, other_rf))
1477 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1478 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1479 action_list_t::reverse_iterator rit = list->rbegin();
1480 ASSERT((*rit) == curr);
1481 /* Skip past curr */
1484 /* Does this write/promise work for everyone? */
1485 for (int i = 0; i < params->maxreads; i++, rit++) {
1486 ModelAction *act = *rit;
1487 if (!act->may_read_from(other_rf))
1494 * Checks whether a thread has read from the same write or Promise for too many
1495 * times without seeing the effects of a later write/Promise.
1498 * 1) there must a different write/promise that we could read from,
1499 * 2) we must have read from the same write/promise in excess of maxreads times,
1500 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1501 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1503 * If so, we decide that the execution is no longer feasible.
1505 * @param curr The current action. Must be a read.
1506 * @param rf The ModelAction/Promise from which we might read.
1507 * @return True if the read should succeed; false otherwise
1509 template <typename T>
1510 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1512 if (!params->maxreads)
1515 //NOTE: Next check is just optimization, not really necessary....
1516 if (curr->get_node()->get_read_from_past_size() +
1517 curr->get_node()->get_read_from_promise_size() <= 1)
1520 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1521 int tid = id_to_int(curr->get_tid());
1522 ASSERT(tid < (int)thrd_lists->size());
1523 action_list_t *list = &(*thrd_lists)[tid];
1524 action_list_t::reverse_iterator rit = list->rbegin();
1525 ASSERT((*rit) == curr);
1526 /* Skip past curr */
1529 action_list_t::reverse_iterator ritcopy = rit;
1530 /* See if we have enough reads from the same value */
1531 for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1532 if (ritcopy == list->rend())
1534 ModelAction *act = *ritcopy;
1535 if (!act->is_read())
1537 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1539 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1541 if (act->get_node()->get_read_from_past_size() +
1542 act->get_node()->get_read_from_promise_size() <= 1)
1545 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1546 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1547 if (should_read_instead(curr, rf, write))
1548 return false; /* liveness failure */
1550 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1551 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1552 if (should_read_instead(curr, rf, promise))
1553 return false; /* liveness failure */
1559 * @brief Updates the mo_graph with the constraints imposed from the current
1562 * Basic idea is the following: Go through each other thread and find
1563 * the last action that happened before our read. Two cases:
1565 * -# The action is a write: that write must either occur before
1566 * the write we read from or be the write we read from.
1567 * -# The action is a read: the write that that action read from
1568 * must occur before the write we read from or be the same write.
1570 * @param curr The current action. Must be a read.
1571 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1572 * @return True if modification order edges were added; false otherwise
1574 template <typename rf_type>
1575 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1577 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1580 ASSERT(curr->is_read());
1582 /* Last SC fence in the current thread */
1583 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1584 ModelAction *last_sc_write = NULL;
1585 if (curr->is_seqcst())
1586 last_sc_write = get_last_seq_cst_write(curr);
1588 /* Iterate over all threads */
1589 for (i = 0; i < thrd_lists->size(); i++) {
1590 /* Last SC fence in thread i */
1591 ModelAction *last_sc_fence_thread_local = NULL;
1592 if (int_to_id((int)i) != curr->get_tid())
1593 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1595 /* Last SC fence in thread i, before last SC fence in current thread */
1596 ModelAction *last_sc_fence_thread_before = NULL;
1597 if (last_sc_fence_local)
1598 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1600 /* Iterate over actions in thread, starting from most recent */
1601 action_list_t *list = &(*thrd_lists)[i];
1602 action_list_t::reverse_iterator rit;
1603 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1604 ModelAction *act = *rit;
1609 /* Don't want to add reflexive edges on 'rf' */
1610 if (act->equals(rf)) {
1611 if (act->happens_before(curr))
1617 if (act->is_write()) {
1618 /* C++, Section 29.3 statement 5 */
1619 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1620 *act < *last_sc_fence_thread_local) {
1621 added = mo_graph->addEdge(act, rf) || added;
1624 /* C++, Section 29.3 statement 4 */
1625 else if (act->is_seqcst() && last_sc_fence_local &&
1626 *act < *last_sc_fence_local) {
1627 added = mo_graph->addEdge(act, rf) || added;
1630 /* C++, Section 29.3 statement 6 */
1631 else if (last_sc_fence_thread_before &&
1632 *act < *last_sc_fence_thread_before) {
1633 added = mo_graph->addEdge(act, rf) || added;
1639 * Include at most one act per-thread that "happens
1642 if (act->happens_before(curr)) {
1643 if (act->is_write()) {
1644 added = mo_graph->addEdge(act, rf) || added;
1646 const ModelAction *prevrf = act->get_reads_from();
1647 const Promise *prevrf_promise = act->get_reads_from_promise();
1649 if (!prevrf->equals(rf))
1650 added = mo_graph->addEdge(prevrf, rf) || added;
1651 } else if (!prevrf_promise->equals(rf)) {
1652 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1661 * All compatible, thread-exclusive promises must be ordered after any
1662 * concrete loads from the same thread
1664 for (unsigned int i = 0; i < promises.size(); i++)
1665 if (promises[i]->is_compatible_exclusive(curr))
1666 added = mo_graph->addEdge(rf, promises[i]) || added;
1672 * Updates the mo_graph with the constraints imposed from the current write.
1674 * Basic idea is the following: Go through each other thread and find
1675 * the lastest action that happened before our write. Two cases:
1677 * (1) The action is a write => that write must occur before
1680 * (2) The action is a read => the write that that action read from
1681 * must occur before the current write.
1683 * This method also handles two other issues:
1685 * (I) Sequential Consistency: Making sure that if the current write is
1686 * seq_cst, that it occurs after the previous seq_cst write.
1688 * (II) Sending the write back to non-synchronizing reads.
1690 * @param curr The current action. Must be a write.
1691 * @param send_fv A vector for stashing reads to which we may pass our future
1692 * value. If NULL, then don't record any future values.
1693 * @return True if modification order edges were added; false otherwise
1695 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1697 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1700 ASSERT(curr->is_write());
1702 if (curr->is_seqcst()) {
1703 /* We have to at least see the last sequentially consistent write,
1704 so we are initialized. */
1705 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1706 if (last_seq_cst != NULL) {
1707 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1711 /* Last SC fence in the current thread */
1712 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1714 /* Iterate over all threads */
1715 for (i = 0; i < thrd_lists->size(); i++) {
1716 /* Last SC fence in thread i, before last SC fence in current thread */
1717 ModelAction *last_sc_fence_thread_before = NULL;
1718 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1719 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1721 /* Iterate over actions in thread, starting from most recent */
1722 action_list_t *list = &(*thrd_lists)[i];
1723 action_list_t::reverse_iterator rit;
1724 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1725 ModelAction *act = *rit;
1728 * 1) If RMW and it actually read from something, then we
1729 * already have all relevant edges, so just skip to next
1732 * 2) If RMW and it didn't read from anything, we should
1733 * whatever edge we can get to speed up convergence.
1735 * 3) If normal write, we need to look at earlier actions, so
1736 * continue processing list.
1738 if (curr->is_rmw()) {
1739 if (curr->get_reads_from() != NULL)
1747 /* C++, Section 29.3 statement 7 */
1748 if (last_sc_fence_thread_before && act->is_write() &&
1749 *act < *last_sc_fence_thread_before) {
1750 added = mo_graph->addEdge(act, curr) || added;
1755 * Include at most one act per-thread that "happens
1758 if (act->happens_before(curr)) {
1760 * Note: if act is RMW, just add edge:
1762 * The following edge should be handled elsewhere:
1763 * readfrom(act) --mo--> act
1765 if (act->is_write())
1766 added = mo_graph->addEdge(act, curr) || added;
1767 else if (act->is_read()) {
1768 //if previous read accessed a null, just keep going
1769 if (act->get_reads_from() == NULL) {
1770 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1772 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1775 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1776 !act->same_thread(curr)) {
1777 /* We have an action that:
1778 (1) did not happen before us
1779 (2) is a read and we are a write
1780 (3) cannot synchronize with us
1781 (4) is in a different thread
1783 that read could potentially read from our write. Note that
1784 these checks are overly conservative at this point, we'll
1785 do more checks before actually removing the
1790 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1791 if (!is_infeasible())
1792 send_fv->push_back(act);
1793 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1794 add_future_value(curr, act);
1801 * All compatible, thread-exclusive promises must be ordered after any
1802 * concrete stores to the same thread, or else they can be merged with
1805 for (unsigned int i = 0; i < promises.size(); i++)
1806 if (promises[i]->is_compatible_exclusive(curr))
1807 added = mo_graph->addEdge(curr, promises[i]) || added;
1812 //This procedure uses cohere to prune future values that are
1813 //guaranteed to generate a coherence violation.
1815 //need to see if there is (1) a promise for thread write, (2)
1816 //the promise is sb before write, (3) the promise can only be
1817 //resolved by the thread read, and (4) the promise has same
1818 //location as read/write
1820 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1821 thread_id_t write_tid=write->get_tid();
1822 for(unsigned int i = promises.size(); i>0; i--) {
1823 Promise *pr=promises[i-1];
1824 if (!pr->same_location(write))
1826 //the reading thread is the only thread that can resolve the promise
1827 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1828 for(unsigned int j=0;j<pr->get_num_readers();j++) {
1829 ModelAction *prreader=pr->get_reader(j);
1830 //the writing thread reads from the promise before the write
1831 if (prreader->get_tid()==write_tid &&
1832 (*prreader)<(*write)) {
1833 if ((*read)>(*prreader)) {
1834 //check that we don't have a read between the read and promise
1835 //from the same thread as read
1837 for(const ModelAction *tmp=read;tmp!=prreader;) {
1838 tmp=tmp->get_node()->get_parent()->get_action();
1839 if (tmp->is_read() && tmp->same_thread(read)) {
1856 /** Arbitrary reads from the future are not allowed. Section 29.3
1857 * part 9 places some constraints. This method checks one result of constraint
1858 * constraint. Others require compiler support. */
1859 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1861 if (!writer->is_rmw())
1864 if (!reader->is_rmw())
1867 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1868 if (search == reader)
1870 if (search->get_tid() == reader->get_tid() &&
1871 search->happens_before(reader))
1879 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1880 * some constraints. This method checks one the following constraint (others
1881 * require compiler support):
1883 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1884 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1886 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1888 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1890 /* Iterate over all threads */
1891 for (i = 0; i < thrd_lists->size(); i++) {
1892 const ModelAction *write_after_read = NULL;
1894 /* Iterate over actions in thread, starting from most recent */
1895 action_list_t *list = &(*thrd_lists)[i];
1896 action_list_t::reverse_iterator rit;
1897 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1898 ModelAction *act = *rit;
1900 /* Don't disallow due to act == reader */
1901 if (!reader->happens_before(act) || reader == act)
1903 else if (act->is_write())
1904 write_after_read = act;
1905 else if (act->is_read() && act->get_reads_from() != NULL)
1906 write_after_read = act->get_reads_from();
1909 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1916 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1917 * The ModelAction under consideration is expected to be taking part in
1918 * release/acquire synchronization as an object of the "reads from" relation.
1919 * Note that this can only provide release sequence support for RMW chains
1920 * which do not read from the future, as those actions cannot be traced until
1921 * their "promise" is fulfilled. Similarly, we may not even establish the
1922 * presence of a release sequence with certainty, as some modification order
1923 * constraints may be decided further in the future. Thus, this function
1924 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1925 * and a boolean representing certainty.
1927 * @param rf The action that might be part of a release sequence. Must be a
1929 * @param release_heads A pass-by-reference style return parameter. After
1930 * execution of this function, release_heads will contain the heads of all the
1931 * relevant release sequences, if any exists with certainty
1932 * @param pending A pass-by-reference style return parameter which is only used
1933 * when returning false (i.e., uncertain). Returns most information regarding
1934 * an uncertain release sequence, including any write operations that might
1935 * break the sequence.
1936 * @return true, if the ModelExecution is certain that release_heads is complete;
1939 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1940 rel_heads_list_t *release_heads,
1941 struct release_seq *pending) const
1943 /* Only check for release sequences if there are no cycles */
1944 if (mo_graph->checkForCycles())
1947 for ( ; rf != NULL; rf = rf->get_reads_from()) {
1948 ASSERT(rf->is_write());
1950 if (rf->is_release())
1951 release_heads->push_back(rf);
1952 else if (rf->get_last_fence_release())
1953 release_heads->push_back(rf->get_last_fence_release());
1955 break; /* End of RMW chain */
1957 /** @todo Need to be smarter here... In the linux lock
1958 * example, this will run to the beginning of the program for
1960 /** @todo The way to be smarter here is to keep going until 1
1961 * thread has a release preceded by an acquire and you've seen
1964 /* acq_rel RMW is a sufficient stopping condition */
1965 if (rf->is_acquire() && rf->is_release())
1966 return true; /* complete */
1969 /* read from future: need to settle this later */
1971 return false; /* incomplete */
1974 if (rf->is_release())
1975 return true; /* complete */
1977 /* else relaxed write
1978 * - check for fence-release in the same thread (29.8, stmt. 3)
1979 * - check modification order for contiguous subsequence
1980 * -> rf must be same thread as release */
1982 const ModelAction *fence_release = rf->get_last_fence_release();
1983 /* Synchronize with a fence-release unconditionally; we don't need to
1984 * find any more "contiguous subsequence..." for it */
1986 release_heads->push_back(fence_release);
1988 int tid = id_to_int(rf->get_tid());
1989 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1990 action_list_t *list = &(*thrd_lists)[tid];
1991 action_list_t::const_reverse_iterator rit;
1993 /* Find rf in the thread list */
1994 rit = std::find(list->rbegin(), list->rend(), rf);
1995 ASSERT(rit != list->rend());
1997 /* Find the last {write,fence}-release */
1998 for (; rit != list->rend(); rit++) {
1999 if (fence_release && *(*rit) < *fence_release)
2001 if ((*rit)->is_release())
2004 if (rit == list->rend()) {
2005 /* No write-release in this thread */
2006 return true; /* complete */
2007 } else if (fence_release && *(*rit) < *fence_release) {
2008 /* The fence-release is more recent (and so, "stronger") than
2009 * the most recent write-release */
2010 return true; /* complete */
2011 } /* else, need to establish contiguous release sequence */
2012 ModelAction *release = *rit;
2014 ASSERT(rf->same_thread(release));
2016 pending->writes.clear();
2018 bool certain = true;
2019 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2020 if (id_to_int(rf->get_tid()) == (int)i)
2022 list = &(*thrd_lists)[i];
2024 /* Can we ensure no future writes from this thread may break
2025 * the release seq? */
2026 bool future_ordered = false;
2028 ModelAction *last = get_last_action(int_to_id(i));
2029 Thread *th = get_thread(int_to_id(i));
2030 if ((last && rf->happens_before(last)) ||
2033 future_ordered = true;
2035 ASSERT(!th->is_model_thread() || future_ordered);
2037 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2038 const ModelAction *act = *rit;
2039 /* Reach synchronization -> this thread is complete */
2040 if (act->happens_before(release))
2042 if (rf->happens_before(act)) {
2043 future_ordered = true;
2047 /* Only non-RMW writes can break release sequences */
2048 if (!act->is_write() || act->is_rmw())
2051 /* Check modification order */
2052 if (mo_graph->checkReachable(rf, act)) {
2053 /* rf --mo--> act */
2054 future_ordered = true;
2057 if (mo_graph->checkReachable(act, release))
2058 /* act --mo--> release */
2060 if (mo_graph->checkReachable(release, act) &&
2061 mo_graph->checkReachable(act, rf)) {
2062 /* release --mo-> act --mo--> rf */
2063 return true; /* complete */
2065 /* act may break release sequence */
2066 pending->writes.push_back(act);
2069 if (!future_ordered)
2070 certain = false; /* This thread is uncertain */
2074 release_heads->push_back(release);
2075 pending->writes.clear();
2077 pending->release = release;
2084 * An interface for getting the release sequence head(s) with which a
2085 * given ModelAction must synchronize. This function only returns a non-empty
2086 * result when it can locate a release sequence head with certainty. Otherwise,
2087 * it may mark the internal state of the ModelExecution so that it will handle
2088 * the release sequence at a later time, causing @a acquire to update its
2089 * synchronization at some later point in execution.
2091 * @param acquire The 'acquire' action that may synchronize with a release
2093 * @param read The read action that may read from a release sequence; this may
2094 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2095 * when 'acquire' is a fence-acquire)
2096 * @param release_heads A pass-by-reference return parameter. Will be filled
2097 * with the head(s) of the release sequence(s), if they exists with certainty.
2098 * @see ModelExecution::release_seq_heads
2100 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2101 ModelAction *read, rel_heads_list_t *release_heads)
2103 const ModelAction *rf = read->get_reads_from();
2104 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2105 sequence->acquire = acquire;
2106 sequence->read = read;
2108 if (!release_seq_heads(rf, release_heads, sequence)) {
2109 /* add act to 'lazy checking' list */
2110 pending_rel_seqs.push_back(sequence);
2112 snapshot_free(sequence);
2117 * @brief Propagate a modified clock vector to actions later in the execution
2120 * After an acquire operation lazily completes a release-sequence
2121 * synchronization, we must update all clock vectors for operations later than
2122 * the acquire in the execution order.
2124 * @param acquire The ModelAction whose clock vector must be propagated
2125 * @param work The work queue to which we can add work items, if this
2126 * propagation triggers more updates (e.g., to the modification order)
2128 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2130 /* Re-check all pending release sequences */
2131 work->push_back(CheckRelSeqWorkEntry(NULL));
2132 /* Re-check read-acquire for mo_graph edges */
2133 work->push_back(MOEdgeWorkEntry(acquire));
2135 /* propagate synchronization to later actions */
2136 action_list_t::reverse_iterator rit = action_trace.rbegin();
2137 for (; (*rit) != acquire; rit++) {
2138 ModelAction *propagate = *rit;
2139 if (acquire->happens_before(propagate)) {
2140 synchronize(acquire, propagate);
2141 /* Re-check 'propagate' for mo_graph edges */
2142 work->push_back(MOEdgeWorkEntry(propagate));
2148 * Attempt to resolve all stashed operations that might synchronize with a
2149 * release sequence for a given location. This implements the "lazy" portion of
2150 * determining whether or not a release sequence was contiguous, since not all
2151 * modification order information is present at the time an action occurs.
2153 * @param location The location/object that should be checked for release
2154 * sequence resolutions. A NULL value means to check all locations.
2155 * @param work_queue The work queue to which to add work items as they are
2157 * @return True if any updates occurred (new synchronization, new mo_graph
2160 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2162 bool updated = false;
2163 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2164 while (it != pending_rel_seqs.end()) {
2165 struct release_seq *pending = *it;
2166 ModelAction *acquire = pending->acquire;
2167 const ModelAction *read = pending->read;
2169 /* Only resolve sequences on the given location, if provided */
2170 if (location && read->get_location() != location) {
2175 const ModelAction *rf = read->get_reads_from();
2176 rel_heads_list_t release_heads;
2178 complete = release_seq_heads(rf, &release_heads, pending);
2179 for (unsigned int i = 0; i < release_heads.size(); i++)
2180 if (!acquire->has_synchronized_with(release_heads[i]))
2181 if (synchronize(release_heads[i], acquire))
2185 /* Propagate the changed clock vector */
2186 propagate_clockvector(acquire, work_queue);
2189 it = pending_rel_seqs.erase(it);
2190 snapshot_free(pending);
2196 // If we resolved promises or data races, see if we have realized a data race.
2203 * Performs various bookkeeping operations for the current ModelAction. For
2204 * instance, adds action to the per-object, per-thread action vector and to the
2205 * action trace list of all thread actions.
2207 * @param act is the ModelAction to add.
2209 void ModelExecution::add_action_to_lists(ModelAction *act)
2211 int tid = id_to_int(act->get_tid());
2212 ModelAction *uninit = NULL;
2214 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2215 if (list->empty() && act->is_atomic_var()) {
2216 uninit = get_uninitialized_action(act);
2217 uninit_id = id_to_int(uninit->get_tid());
2218 list->push_front(uninit);
2220 list->push_back(act);
2222 action_trace.push_back(act);
2224 action_trace.push_front(uninit);
2226 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2227 if (tid >= (int)vec->size())
2228 vec->resize(priv->next_thread_id);
2229 (*vec)[tid].push_back(act);
2231 (*vec)[uninit_id].push_front(uninit);
2233 if ((int)thrd_last_action.size() <= tid)
2234 thrd_last_action.resize(get_num_threads());
2235 thrd_last_action[tid] = act;
2237 thrd_last_action[uninit_id] = uninit;
2239 if (act->is_fence() && act->is_release()) {
2240 if ((int)thrd_last_fence_release.size() <= tid)
2241 thrd_last_fence_release.resize(get_num_threads());
2242 thrd_last_fence_release[tid] = act;
2245 if (act->is_wait()) {
2246 void *mutex_loc = (void *) act->get_value();
2247 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2249 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2250 if (tid >= (int)vec->size())
2251 vec->resize(priv->next_thread_id);
2252 (*vec)[tid].push_back(act);
2257 * @brief Get the last action performed by a particular Thread
2258 * @param tid The thread ID of the Thread in question
2259 * @return The last action in the thread
2261 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2263 int threadid = id_to_int(tid);
2264 if (threadid < (int)thrd_last_action.size())
2265 return thrd_last_action[id_to_int(tid)];
2271 * @brief Get the last fence release performed by a particular Thread
2272 * @param tid The thread ID of the Thread in question
2273 * @return The last fence release in the thread, if one exists; NULL otherwise
2275 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2277 int threadid = id_to_int(tid);
2278 if (threadid < (int)thrd_last_fence_release.size())
2279 return thrd_last_fence_release[id_to_int(tid)];
2285 * Gets the last memory_order_seq_cst write (in the total global sequence)
2286 * performed on a particular object (i.e., memory location), not including the
2288 * @param curr The current ModelAction; also denotes the object location to
2290 * @return The last seq_cst write
2292 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2294 void *location = curr->get_location();
2295 action_list_t *list = obj_map.get(location);
2296 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2297 action_list_t::reverse_iterator rit;
2298 for (rit = list->rbegin(); (*rit) != curr; rit++)
2300 rit++; /* Skip past curr */
2301 for ( ; rit != list->rend(); rit++)
2302 if ((*rit)->is_write() && (*rit)->is_seqcst())
2308 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2309 * performed in a particular thread, prior to a particular fence.
2310 * @param tid The ID of the thread to check
2311 * @param before_fence The fence from which to begin the search; if NULL, then
2312 * search for the most recent fence in the thread.
2313 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2315 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2317 /* All fences should have location FENCE_LOCATION */
2318 action_list_t *list = obj_map.get(FENCE_LOCATION);
2323 action_list_t::reverse_iterator rit = list->rbegin();
2326 for (; rit != list->rend(); rit++)
2327 if (*rit == before_fence)
2330 ASSERT(*rit == before_fence);
2334 for (; rit != list->rend(); rit++)
2335 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2341 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2342 * location). This function identifies the mutex according to the current
2343 * action, which is presumed to perform on the same mutex.
2344 * @param curr The current ModelAction; also denotes the object location to
2346 * @return The last unlock operation
2348 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2350 void *location = curr->get_location();
2351 action_list_t *list = obj_map.get(location);
2352 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2353 action_list_t::reverse_iterator rit;
2354 for (rit = list->rbegin(); rit != list->rend(); rit++)
2355 if ((*rit)->is_unlock() || (*rit)->is_wait())
2360 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2362 ModelAction *parent = get_last_action(tid);
2364 parent = get_thread(tid)->get_creation();
2369 * Returns the clock vector for a given thread.
2370 * @param tid The thread whose clock vector we want
2371 * @return Desired clock vector
2373 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2375 return get_parent_action(tid)->get_cv();
2379 * @brief Find the promise (if any) to resolve for the current action and
2380 * remove it from the pending promise vector
2381 * @param curr The current ModelAction. Should be a write.
2382 * @return The Promise to resolve, if any; otherwise NULL
2384 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2386 for (unsigned int i = 0; i < promises.size(); i++)
2387 if (curr->get_node()->get_promise(i)) {
2388 Promise *ret = promises[i];
2389 promises.erase(promises.begin() + i);
2396 * Resolve a Promise with a current write.
2397 * @param write The ModelAction that is fulfilling Promises
2398 * @param promise The Promise to resolve
2399 * @param work The work queue, for adding new fixup work
2400 * @return True if the Promise was successfully resolved; false otherwise
2402 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2405 ModelVector<ModelAction *> actions_to_check;
2407 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2408 ModelAction *read = promise->get_reader(i);
2409 if (read_from(read, write)) {
2410 /* Propagate the changed clock vector */
2411 propagate_clockvector(read, work);
2413 actions_to_check.push_back(read);
2415 /* Make sure the promise's value matches the write's value */
2416 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2417 if (!mo_graph->resolvePromise(promise, write))
2418 priv->hard_failed_promise = true;
2421 * @todo It is possible to end up in an inconsistent state, where a
2422 * "resolved" promise may still be referenced if
2423 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2425 * Note that the inconsistency only matters when dumping mo_graph to
2431 //Check whether reading these writes has made threads unable to
2433 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2434 ModelAction *read = actions_to_check[i];
2435 mo_check_promises(read, true);
2442 * Compute the set of promises that could potentially be satisfied by this
2443 * action. Note that the set computation actually appears in the Node, not in
2445 * @param curr The ModelAction that may satisfy promises
2447 void ModelExecution::compute_promises(ModelAction *curr)
2449 for (unsigned int i = 0; i < promises.size(); i++) {
2450 Promise *promise = promises[i];
2451 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2454 bool satisfy = true;
2455 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2456 const ModelAction *act = promise->get_reader(j);
2457 if (act->happens_before(curr) ||
2458 act->could_synchronize_with(curr)) {
2464 curr->get_node()->set_promise(i);
2468 /** Checks promises in response to change in ClockVector Threads. */
2469 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2471 for (unsigned int i = 0; i < promises.size(); i++) {
2472 Promise *promise = promises[i];
2473 if (!promise->thread_is_available(tid))
2475 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2476 const ModelAction *act = promise->get_reader(j);
2477 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2478 merge_cv->synchronized_since(act)) {
2479 if (promise->eliminate_thread(tid)) {
2480 /* Promise has failed */
2481 priv->failed_promise = true;
2489 void ModelExecution::check_promises_thread_disabled()
2491 for (unsigned int i = 0; i < promises.size(); i++) {
2492 Promise *promise = promises[i];
2493 if (promise->has_failed()) {
2494 priv->failed_promise = true;
2501 * @brief Checks promises in response to addition to modification order for
2504 * We test whether threads are still available for satisfying promises after an
2505 * addition to our modification order constraints. Those that are unavailable
2506 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2507 * that promise has failed.
2509 * @param act The ModelAction which updated the modification order
2510 * @param is_read_check Should be true if act is a read and we must check for
2511 * updates to the store from which it read (there is a distinction here for
2512 * RMW's, which are both a load and a store)
2514 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2516 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2518 for (unsigned int i = 0; i < promises.size(); i++) {
2519 Promise *promise = promises[i];
2521 // Is this promise on the same location?
2522 if (!promise->same_location(write))
2525 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2526 const ModelAction *pread = promise->get_reader(j);
2527 if (!pread->happens_before(act))
2529 if (mo_graph->checkPromise(write, promise)) {
2530 priv->hard_failed_promise = true;
2536 // Don't do any lookups twice for the same thread
2537 if (!promise->thread_is_available(act->get_tid()))
2540 if (mo_graph->checkReachable(promise, write)) {
2541 if (mo_graph->checkPromise(write, promise)) {
2542 priv->hard_failed_promise = true;
2550 * Compute the set of writes that may break the current pending release
2551 * sequence. This information is extracted from previou release sequence
2554 * @param curr The current ModelAction. Must be a release sequence fixup
2557 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2559 if (pending_rel_seqs.empty())
2562 struct release_seq *pending = pending_rel_seqs.back();
2563 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2564 const ModelAction *write = pending->writes[i];
2565 curr->get_node()->add_relseq_break(write);
2568 /* NULL means don't break the sequence; just synchronize */
2569 curr->get_node()->add_relseq_break(NULL);
2573 * Build up an initial set of all past writes that this 'read' action may read
2574 * from, as well as any previously-observed future values that must still be valid.
2576 * @param curr is the current ModelAction that we are exploring; it must be a
2579 void ModelExecution::build_may_read_from(ModelAction *curr)
2581 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2583 ASSERT(curr->is_read());
2585 ModelAction *last_sc_write = NULL;
2587 if (curr->is_seqcst())
2588 last_sc_write = get_last_seq_cst_write(curr);
2590 /* Iterate over all threads */
2591 for (i = 0; i < thrd_lists->size(); i++) {
2592 /* Iterate over actions in thread, starting from most recent */
2593 action_list_t *list = &(*thrd_lists)[i];
2594 action_list_t::reverse_iterator rit;
2595 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2596 ModelAction *act = *rit;
2598 /* Only consider 'write' actions */
2599 if (!act->is_write() || act == curr)
2602 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2603 bool allow_read = true;
2605 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2607 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2611 /* Only add feasible reads */
2612 mo_graph->startChanges();
2613 r_modification_order(curr, act);
2614 if (!is_infeasible())
2615 curr->get_node()->add_read_from_past(act);
2616 mo_graph->rollbackChanges();
2619 /* Include at most one act per-thread that "happens before" curr */
2620 if (act->happens_before(curr))
2625 /* Inherit existing, promised future values */
2626 for (i = 0; i < promises.size(); i++) {
2627 const Promise *promise = promises[i];
2628 const ModelAction *promise_read = promise->get_reader(0);
2629 if (promise_read->same_var(curr)) {
2630 /* Only add feasible future-values */
2631 mo_graph->startChanges();
2632 r_modification_order(curr, promise);
2633 if (!is_infeasible())
2634 curr->get_node()->add_read_from_promise(promise_read);
2635 mo_graph->rollbackChanges();
2639 /* We may find no valid may-read-from only if the execution is doomed */
2640 if (!curr->get_node()->read_from_size()) {
2641 priv->no_valid_reads = true;
2645 if (DBG_ENABLED()) {
2646 model_print("Reached read action:\n");
2648 model_print("Printing read_from_past\n");
2649 curr->get_node()->print_read_from_past();
2650 model_print("End printing read_from_past\n");
2654 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2656 for ( ; write != NULL; write = write->get_reads_from()) {
2657 /* UNINIT actions don't have a Node, and they never sleep */
2658 if (write->is_uninitialized())
2660 Node *prevnode = write->get_node()->get_parent();
2662 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2663 if (write->is_release() && thread_sleep)
2665 if (!write->is_rmw())
2672 * @brief Get an action representing an uninitialized atomic
2674 * This function may create a new one or try to retrieve one from the NodeStack
2676 * @param curr The current action, which prompts the creation of an UNINIT action
2677 * @return A pointer to the UNINIT ModelAction
2679 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2681 Node *node = curr->get_node();
2682 ModelAction *act = node->get_uninit_action();
2684 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2685 node->set_uninit_action(act);
2687 act->create_cv(NULL);
2691 static void print_list(const action_list_t *list)
2693 action_list_t::const_iterator it;
2695 model_print("------------------------------------------------------------------------------------\n");
2696 model_print("# t Action type MO Location Value Rf CV\n");
2697 model_print("------------------------------------------------------------------------------------\n");
2699 unsigned int hash = 0;
2701 for (it = list->begin(); it != list->end(); it++) {
2702 const ModelAction *act = *it;
2703 if (act->get_seq_number() > 0)
2705 hash = hash^(hash<<3)^((*it)->hash());
2707 model_print("HASH %u\n", hash);
2708 model_print("------------------------------------------------------------------------------------\n");
2711 #if SUPPORT_MOD_ORDER_DUMP
2712 void ModelExecution::dumpGraph(char *filename) const
2715 sprintf(buffer, "%s.dot", filename);
2716 FILE *file = fopen(buffer, "w");
2717 fprintf(file, "digraph %s {\n", filename);
2718 mo_graph->dumpNodes(file);
2719 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2721 for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2722 ModelAction *act = *it;
2723 if (act->is_read()) {
2724 mo_graph->dot_print_node(file, act);
2725 if (act->get_reads_from())
2726 mo_graph->dot_print_edge(file,
2727 act->get_reads_from(),
2729 "label=\"rf\", color=red, weight=2");
2731 mo_graph->dot_print_edge(file,
2732 act->get_reads_from_promise(),
2734 "label=\"rf\", color=red");
2736 if (thread_array[act->get_tid()]) {
2737 mo_graph->dot_print_edge(file,
2738 thread_array[id_to_int(act->get_tid())],
2740 "label=\"sb\", color=blue, weight=400");
2743 thread_array[act->get_tid()] = act;
2745 fprintf(file, "}\n");
2746 model_free(thread_array);
2751 /** @brief Prints an execution trace summary. */
2752 void ModelExecution::print_summary() const
2754 #if SUPPORT_MOD_ORDER_DUMP
2755 char buffername[100];
2756 sprintf(buffername, "exec%04u", get_execution_number());
2757 mo_graph->dumpGraphToFile(buffername);
2758 sprintf(buffername, "graph%04u", get_execution_number());
2759 dumpGraph(buffername);
2762 model_print("Execution trace %d:", get_execution_number());
2763 if (isfeasibleprefix()) {
2764 if (is_yieldblocked())
2765 model_print(" YIELD BLOCKED");
2766 if (scheduler->all_threads_sleeping())
2767 model_print(" SLEEP-SET REDUNDANT");
2768 if (have_bug_reports())
2769 model_print(" DETECTED BUG(S)");
2771 print_infeasibility(" INFEASIBLE");
2774 print_list(&action_trace);
2777 if (!promises.empty()) {
2778 model_print("Pending promises:\n");
2779 for (unsigned int i = 0; i < promises.size(); i++) {
2780 model_print(" [P%u] ", i);
2781 promises[i]->print();
2788 * Add a Thread to the system for the first time. Should only be called once
2790 * @param t The Thread to add
2792 void ModelExecution::add_thread(Thread *t)
2794 unsigned int i = id_to_int(t->get_id());
2795 if (i >= thread_map.size())
2796 thread_map.resize(i + 1);
2798 if (!t->is_model_thread())
2799 scheduler->add_thread(t);
2803 * @brief Get a Thread reference by its ID
2804 * @param tid The Thread's ID
2805 * @return A Thread reference
2807 Thread * ModelExecution::get_thread(thread_id_t tid) const
2809 unsigned int i = id_to_int(tid);
2810 if (i < thread_map.size())
2811 return thread_map[i];
2816 * @brief Get a reference to the Thread in which a ModelAction was executed
2817 * @param act The ModelAction
2818 * @return A Thread reference
2820 Thread * ModelExecution::get_thread(const ModelAction *act) const
2822 return get_thread(act->get_tid());
2826 * @brief Get a Promise's "promise number"
2828 * A "promise number" is an index number that is unique to a promise, valid
2829 * only for a specific snapshot of an execution trace. Promises may come and go
2830 * as they are generated an resolved, so an index only retains meaning for the
2833 * @param promise The Promise to check
2834 * @return The promise index, if the promise still is valid; otherwise -1
2836 int ModelExecution::get_promise_number(const Promise *promise) const
2838 for (unsigned int i = 0; i < promises.size(); i++)
2839 if (promises[i] == promise)
2846 * @brief Check if a Thread is currently enabled
2847 * @param t The Thread to check
2848 * @return True if the Thread is currently enabled
2850 bool ModelExecution::is_enabled(Thread *t) const
2852 return scheduler->is_enabled(t);
2856 * @brief Check if a Thread is currently enabled
2857 * @param tid The ID of the Thread to check
2858 * @return True if the Thread is currently enabled
2860 bool ModelExecution::is_enabled(thread_id_t tid) const
2862 return scheduler->is_enabled(tid);
2866 * @brief Select the next thread to execute based on the curren action
2868 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2869 * actions should be followed by the execution of their child thread. In either
2870 * case, the current action should determine the next thread schedule.
2872 * @param curr The current action
2873 * @return The next thread to run, if the current action will determine this
2874 * selection; otherwise NULL
2876 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2878 /* Do not split atomic RMW */
2879 if (curr->is_rmwr())
2880 return get_thread(curr);
2881 if (curr->is_write()) {
2882 // std::memory_order order = curr->get_mo();
2884 // case std::memory_order_relaxed:
2885 // return get_thread(curr);
2886 // case std::memory_order_release:
2887 // return get_thread(curr);
2894 /* Follow CREATE with the created thread */
2895 if (curr->get_type() == THREAD_CREATE)
2896 return curr->get_thread_operand();
2900 /** @return True if the execution has taken too many steps */
2901 bool ModelExecution::too_many_steps() const
2903 return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2907 * Takes the next step in the execution, if possible.
2908 * @param curr The current step to take
2909 * @return Returns the next Thread to run, if any; NULL if this execution
2912 Thread * ModelExecution::take_step(ModelAction *curr)
2914 Thread *curr_thrd = get_thread(curr);
2915 ASSERT(curr_thrd->get_state() == THREAD_READY);
2917 ASSERT(check_action_enabled(curr)); /* May have side effects? */
2918 curr = check_current_action(curr);
2921 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2922 scheduler->remove_thread(curr_thrd);
2924 return action_select_next_thread(curr);
2928 * Launch end-of-execution release sequence fixups only when
2929 * the execution is otherwise feasible AND there are:
2931 * (1) pending release sequences
2932 * (2) pending assertions that could be invalidated by a change
2933 * in clock vectors (i.e., data races)
2934 * (3) no pending promises
2936 void ModelExecution::fixup_release_sequences()
2938 while (!pending_rel_seqs.empty() &&
2939 is_feasible_prefix_ignore_relseq() &&
2940 haveUnrealizedRaces()) {
2941 model_print("*** WARNING: release sequence fixup action "
2942 "(%zu pending release seuqence(s)) ***\n",
2943 pending_rel_seqs.size());
2944 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2945 std::memory_order_seq_cst, NULL, VALUE_NONE,