10 #include "nodestack.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
32 failed_promise(false),
33 hard_failed_promise(false),
34 too_many_reads(false),
35 no_valid_reads(false),
36 bad_synchronization(false),
41 ~model_snapshot_members() {
42 for (unsigned int i = 0; i < bugs.size(); i++)
47 unsigned int next_thread_id;
48 modelclock_t used_sequence_numbers;
49 ModelAction *next_backtrack;
50 SnapVector<bug_message *> bugs;
52 bool hard_failed_promise;
55 /** @brief Incorrectly-ordered synchronization was made */
56 bool bad_synchronization;
63 /** @brief Constructor */
64 ModelExecution::ModelExecution(ModelChecker *m,
65 const struct model_params *params,
67 NodeStack *node_stack) :
72 thread_map(2), /* We'll always need at least 2 threads */
74 condvar_waiters_map(),
80 thrd_last_fence_release(),
81 node_stack(node_stack),
82 priv(new struct model_snapshot_members()),
83 mo_graph(new CycleGraph())
85 /* Initialize a model-checker thread, for special ModelActions */
86 model_thread = new Thread(get_next_id());
87 add_thread(model_thread);
88 scheduler->register_engine(this);
89 node_stack->register_engine(this);
92 /** @brief Destructor */
93 ModelExecution::~ModelExecution()
95 for (unsigned int i = 0; i < get_num_threads(); i++)
96 delete get_thread(int_to_id(i));
98 for (unsigned int i = 0; i < promises.size(); i++)
105 int ModelExecution::get_execution_number() const
107 return model->get_execution_number();
110 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
112 action_list_t *tmp = hash->get(ptr);
114 tmp = new action_list_t();
120 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
122 SnapVector<action_list_t> *tmp = hash->get(ptr);
124 tmp = new SnapVector<action_list_t>();
130 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
132 SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
135 unsigned int thread=id_to_int(tid);
136 if (thread < wrv->size())
137 return &(*wrv)[thread];
142 /** @return a thread ID for a new Thread */
143 thread_id_t ModelExecution::get_next_id()
145 return priv->next_thread_id++;
148 /** @return the number of user threads created during this execution */
149 unsigned int ModelExecution::get_num_threads() const
151 return priv->next_thread_id;
154 /** @return a sequence number for a new ModelAction */
155 modelclock_t ModelExecution::get_next_seq_num()
157 return ++priv->used_sequence_numbers;
161 * @brief Should the current action wake up a given thread?
163 * @param curr The current action
164 * @param thread The thread that we might wake up
165 * @return True, if we should wake up the sleeping thread; false otherwise
167 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
169 const ModelAction *asleep = thread->get_pending();
170 /* Don't allow partial RMW to wake anyone up */
173 /* Synchronizing actions may have been backtracked */
174 if (asleep->could_synchronize_with(curr))
176 /* All acquire/release fences and fence-acquire/store-release */
177 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
179 /* Fence-release + store can awake load-acquire on the same location */
180 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
181 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
182 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
188 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
190 for (unsigned int i = 0; i < get_num_threads(); i++) {
191 Thread *thr = get_thread(int_to_id(i));
192 if (scheduler->is_sleep_set(thr)) {
193 if (should_wake_up(curr, thr))
194 /* Remove this thread from sleep set */
195 scheduler->remove_sleep(thr);
200 /** @brief Alert the model-checker that an incorrectly-ordered
201 * synchronization was made */
202 void ModelExecution::set_bad_synchronization()
204 priv->bad_synchronization = true;
207 /** @brief Alert the model-checker that an incorrectly-ordered
208 * synchronization was made */
209 void ModelExecution::set_bad_sc_read()
211 priv->bad_sc_read = true;
214 bool ModelExecution::assert_bug(const char *msg)
216 priv->bugs.push_back(new bug_message(msg));
218 if (isfeasibleprefix()) {
225 /** @return True, if any bugs have been reported for this execution */
226 bool ModelExecution::have_bug_reports() const
228 return priv->bugs.size() != 0;
231 SnapVector<bug_message *> * ModelExecution::get_bugs() const
237 * Check whether the current trace has triggered an assertion which should halt
240 * @return True, if the execution should be aborted; false otherwise
242 bool ModelExecution::has_asserted() const
244 return priv->asserted;
248 * Trigger a trace assertion which should cause this execution to be halted.
249 * This can be due to a detected bug or due to an infeasibility that should
252 void ModelExecution::set_assert()
254 priv->asserted = true;
258 * Check if we are in a deadlock. Should only be called at the end of an
259 * execution, although it should not give false positives in the middle of an
260 * execution (there should be some ENABLED thread).
262 * @return True if program is in a deadlock; false otherwise
264 bool ModelExecution::is_deadlocked() const
266 bool blocking_threads = false;
267 for (unsigned int i = 0; i < get_num_threads(); i++) {
268 thread_id_t tid = int_to_id(i);
271 Thread *t = get_thread(tid);
272 if (!t->is_model_thread() && t->get_pending())
273 blocking_threads = true;
275 return blocking_threads;
279 * @brief Check if we are yield-blocked
281 * A program can be "yield-blocked" if all threads are ready to execute a
284 * @return True if the program is yield-blocked; false otherwise
286 bool ModelExecution::is_yieldblocked() const
288 if (!params->yieldblock)
291 for (unsigned int i = 0; i < get_num_threads(); i++) {
292 thread_id_t tid = int_to_id(i);
293 Thread *t = get_thread(tid);
294 if (t->get_pending() && t->get_pending()->is_yield())
301 * Check if this is a complete execution. That is, have all thread completed
302 * execution (rather than exiting because sleep sets have forced a redundant
305 * @return True if the execution is complete.
307 bool ModelExecution::is_complete_execution() const
309 if (is_yieldblocked())
311 for (unsigned int i = 0; i < get_num_threads(); i++)
312 if (is_enabled(int_to_id(i)))
318 * @brief Find the last fence-related backtracking conflict for a ModelAction
320 * This function performs the search for the most recent conflicting action
321 * against which we should perform backtracking, as affected by fence
322 * operations. This includes pairs of potentially-synchronizing actions which
323 * occur due to fence-acquire or fence-release, and hence should be explored in
324 * the opposite execution order.
326 * @param act The current action
327 * @return The most recent action which conflicts with act due to fences
329 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
331 /* Only perform release/acquire fence backtracking for stores */
332 if (!act->is_write())
335 /* Find a fence-release (or, act is a release) */
336 ModelAction *last_release;
337 if (act->is_release())
340 last_release = get_last_fence_release(act->get_tid());
344 /* Skip past the release */
345 const action_list_t *list = &action_trace;
346 action_list_t::const_reverse_iterator rit;
347 for (rit = list->rbegin(); rit != list->rend(); rit++)
348 if (*rit == last_release)
350 ASSERT(rit != list->rend());
355 * load --sb-> fence-acquire */
356 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
357 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
358 bool found_acquire_fences = false;
359 for ( ; rit != list->rend(); rit++) {
360 ModelAction *prev = *rit;
361 if (act->same_thread(prev))
364 int tid = id_to_int(prev->get_tid());
366 if (prev->is_read() && act->same_var(prev)) {
367 if (prev->is_acquire()) {
368 /* Found most recent load-acquire, don't need
369 * to search for more fences */
370 if (!found_acquire_fences)
373 prior_loads[tid] = prev;
376 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
377 found_acquire_fences = true;
378 acquire_fences[tid] = prev;
382 ModelAction *latest_backtrack = NULL;
383 for (unsigned int i = 0; i < acquire_fences.size(); i++)
384 if (acquire_fences[i] && prior_loads[i])
385 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
386 latest_backtrack = acquire_fences[i];
387 return latest_backtrack;
391 * @brief Find the last backtracking conflict for a ModelAction
393 * This function performs the search for the most recent conflicting action
394 * against which we should perform backtracking. This primary includes pairs of
395 * synchronizing actions which should be explored in the opposite execution
398 * @param act The current action
399 * @return The most recent action which conflicts with act
401 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
403 switch (act->get_type()) {
405 /* Only seq-cst fences can (directly) cause backtracking */
406 if (!act->is_seqcst())
411 ModelAction *ret = NULL;
413 /* linear search: from most recent to oldest */
414 action_list_t *list = obj_map.get(act->get_location());
415 action_list_t::reverse_iterator rit;
416 for (rit = list->rbegin(); rit != list->rend(); rit++) {
417 ModelAction *prev = *rit;
420 if (prev->could_synchronize_with(act)) {
426 ModelAction *ret2 = get_last_fence_conflict(act);
436 case ATOMIC_TRYLOCK: {
437 /* linear search: from most recent to oldest */
438 action_list_t *list = obj_map.get(act->get_location());
439 action_list_t::reverse_iterator rit;
440 for (rit = list->rbegin(); rit != list->rend(); rit++) {
441 ModelAction *prev = *rit;
442 if (act->is_conflicting_lock(prev))
447 case ATOMIC_UNLOCK: {
448 /* linear search: from most recent to oldest */
449 action_list_t *list = obj_map.get(act->get_location());
450 action_list_t::reverse_iterator rit;
451 for (rit = list->rbegin(); rit != list->rend(); rit++) {
452 ModelAction *prev = *rit;
453 if (!act->same_thread(prev) && prev->is_failed_trylock())
459 /* linear search: from most recent to oldest */
460 action_list_t *list = obj_map.get(act->get_location());
461 action_list_t::reverse_iterator rit;
462 for (rit = list->rbegin(); rit != list->rend(); rit++) {
463 ModelAction *prev = *rit;
464 if (!act->same_thread(prev) && prev->is_failed_trylock())
466 if (!act->same_thread(prev) && prev->is_notify())
472 case ATOMIC_NOTIFY_ALL:
473 case ATOMIC_NOTIFY_ONE: {
474 /* linear search: from most recent to oldest */
475 action_list_t *list = obj_map.get(act->get_location());
476 action_list_t::reverse_iterator rit;
477 for (rit = list->rbegin(); rit != list->rend(); rit++) {
478 ModelAction *prev = *rit;
479 if (!act->same_thread(prev) && prev->is_wait())
490 /** This method finds backtracking points where we should try to
491 * reorder the parameter ModelAction against.
493 * @param the ModelAction to find backtracking points for.
495 void ModelExecution::set_backtracking(ModelAction *act)
497 Thread *t = get_thread(act);
498 ModelAction *prev = get_last_conflict(act);
502 Node *node = prev->get_node()->get_parent();
504 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
505 int low_tid, high_tid;
506 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
507 low_tid = id_to_int(act->get_tid());
508 high_tid = low_tid + 1;
511 high_tid = get_num_threads();
514 for (int i = low_tid; i < high_tid; i++) {
515 thread_id_t tid = int_to_id(i);
517 /* Make sure this thread can be enabled here. */
518 if (i >= node->get_num_threads())
521 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
522 /* Don't backtrack into a point where the thread is disabled or sleeping. */
523 if (node->enabled_status(tid) != THREAD_ENABLED)
526 /* Check if this has been explored already */
527 if (node->has_been_explored(tid))
530 /* See if fairness allows */
531 if (params->fairwindow != 0 && !node->has_priority(tid)) {
533 for (int t = 0; t < node->get_num_threads(); t++) {
534 thread_id_t tother = int_to_id(t);
535 if (node->is_enabled(tother) && node->has_priority(tother)) {
544 /* See if CHESS-like yield fairness allows */
545 if (params->yieldon) {
547 for (int t = 0; t < node->get_num_threads(); t++) {
548 thread_id_t tother = int_to_id(t);
549 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
558 /* Cache the latest backtracking point */
559 set_latest_backtrack(prev);
561 /* If this is a new backtracking point, mark the tree */
562 if (!node->set_backtrack(tid))
564 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
565 id_to_int(prev->get_tid()),
566 id_to_int(t->get_id()));
575 * @brief Cache the a backtracking point as the "most recent", if eligible
577 * Note that this does not prepare the NodeStack for this backtracking
578 * operation, it only caches the action on a per-execution basis
580 * @param act The operation at which we should explore a different next action
581 * (i.e., backtracking point)
582 * @return True, if this action is now the most recent backtracking point;
585 bool ModelExecution::set_latest_backtrack(ModelAction *act)
587 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
588 priv->next_backtrack = act;
595 * Returns last backtracking point. The model checker will explore a different
596 * path for this point in the next execution.
597 * @return The ModelAction at which the next execution should diverge.
599 ModelAction * ModelExecution::get_next_backtrack()
601 ModelAction *next = priv->next_backtrack;
602 priv->next_backtrack = NULL;
607 * Processes a read model action.
608 * @param curr is the read model action to process.
609 * @return True if processing this read updates the mo_graph.
611 bool ModelExecution::process_read(ModelAction *curr)
613 Node *node = curr->get_node();
615 bool updated = false;
616 switch (node->get_read_from_status()) {
617 case READ_FROM_PAST: {
618 const ModelAction *rf = node->get_read_from_past();
621 mo_graph->startChanges();
623 ASSERT(!is_infeasible());
624 if (!check_recency(curr, rf)) {
625 if (node->increment_read_from()) {
626 mo_graph->rollbackChanges();
629 priv->too_many_reads = true;
633 updated = r_modification_order(curr, rf);
635 mo_graph->commitChanges();
636 mo_check_promises(curr, true);
639 case READ_FROM_PROMISE: {
640 Promise *promise = curr->get_node()->get_read_from_promise();
641 if (promise->add_reader(curr))
642 priv->failed_promise = true;
643 curr->set_read_from_promise(promise);
644 mo_graph->startChanges();
645 if (!check_recency(curr, promise))
646 priv->too_many_reads = true;
647 updated = r_modification_order(curr, promise);
648 mo_graph->commitChanges();
651 case READ_FROM_FUTURE: {
652 /* Read from future value */
653 struct future_value fv = node->get_future_value();
654 Promise *promise = new Promise(this, curr, fv);
655 curr->set_read_from_promise(promise);
656 promises.push_back(promise);
657 mo_graph->startChanges();
658 updated = r_modification_order(curr, promise);
659 mo_graph->commitChanges();
665 get_thread(curr)->set_return_value(curr->get_return_value());
671 * Processes a lock, trylock, or unlock model action. @param curr is
672 * the read model action to process.
674 * The try lock operation checks whether the lock is taken. If not,
675 * it falls to the normal lock operation case. If so, it returns
678 * The lock operation has already been checked that it is enabled, so
679 * it just grabs the lock and synchronizes with the previous unlock.
681 * The unlock operation has to re-enable all of the threads that are
682 * waiting on the lock.
684 * @return True if synchronization was updated; false otherwise
686 bool ModelExecution::process_mutex(ModelAction *curr)
688 std::mutex *mutex = curr->get_mutex();
689 struct std::mutex_state *state = NULL;
692 state = mutex->get_state();
694 switch (curr->get_type()) {
695 case ATOMIC_TRYLOCK: {
696 bool success = !state->locked;
697 curr->set_try_lock(success);
699 get_thread(curr)->set_return_value(0);
702 get_thread(curr)->set_return_value(1);
704 //otherwise fall into the lock case
706 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
707 assert_bug("Lock access before initialization");
708 state->locked = get_thread(curr);
709 ModelAction *unlock = get_last_unlock(curr);
710 //synchronize with the previous unlock statement
711 if (unlock != NULL) {
712 synchronize(unlock, curr);
718 case ATOMIC_UNLOCK: {
719 /* wake up the other threads */
720 for (unsigned int i = 0; i < get_num_threads(); i++) {
721 Thread *t = get_thread(int_to_id(i));
722 Thread *curr_thrd = get_thread(curr);
723 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
727 /* unlock the lock - after checking who was waiting on it */
728 state->locked = NULL;
730 if (!curr->is_wait())
731 break; /* The rest is only for ATOMIC_WAIT */
733 /* Should we go to sleep? (simulate spurious failures) */
734 if (curr->get_node()->get_misc() == 0) {
735 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
737 scheduler->sleep(get_thread(curr));
741 case ATOMIC_NOTIFY_ALL: {
742 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
743 //activate all the waiting threads
744 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
745 scheduler->wake(get_thread(*rit));
750 case ATOMIC_NOTIFY_ONE: {
751 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
752 int wakeupthread = curr->get_node()->get_misc();
753 action_list_t::iterator it = waiters->begin();
754 advance(it, wakeupthread);
755 scheduler->wake(get_thread(*it));
767 * @brief Check if the current pending promises allow a future value to be sent
769 * It is unsafe to pass a future value back if there exists a pending promise Pr
772 * reader --exec-> Pr --exec-> writer
774 * If such Pr exists, we must save the pending future value until Pr is
777 * @param writer The operation which sends the future value. Must be a write.
778 * @param reader The operation which will observe the value. Must be a read.
779 * @return True if the future value can be sent now; false if it must wait.
781 bool ModelExecution::promises_may_allow(const ModelAction *writer,
782 const ModelAction *reader) const
784 for (int i = promises.size() - 1; i >= 0; i--) {
785 ModelAction *pr = promises[i]->get_reader(0);
786 //reader is after promise...doesn't cross any promise
789 //writer is after promise, reader before...bad...
797 * @brief Add a future value to a reader
799 * This function performs a few additional checks to ensure that the future
800 * value can be feasibly observed by the reader
802 * @param writer The operation whose value is sent. Must be a write.
803 * @param reader The read operation which may read the future value. Must be a read.
805 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
807 /* Do more ambitious checks now that mo is more complete */
808 if (!mo_may_allow(writer, reader))
811 Node *node = reader->get_node();
813 /* Find an ancestor thread which exists at the time of the reader */
814 Thread *write_thread = get_thread(writer);
815 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
816 write_thread = write_thread->get_parent();
818 struct future_value fv = {
819 writer->get_write_value(),
820 writer->get_seq_number() + params->maxfuturedelay,
821 write_thread->get_id(),
823 if (node->add_future_value(fv))
824 set_latest_backtrack(reader);
828 * Process a write ModelAction
829 * @param curr The ModelAction to process
830 * @param work The work queue, for adding fixup work
831 * @return True if the mo_graph was updated or promises were resolved
833 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
835 /* Readers to which we may send our future value */
836 ModelVector<ModelAction *> send_fv;
838 const ModelAction *earliest_promise_reader;
839 bool updated_promises = false;
841 bool updated_mod_order = w_modification_order(curr, &send_fv);
842 Promise *promise = pop_promise_to_resolve(curr);
845 earliest_promise_reader = promise->get_reader(0);
846 updated_promises = resolve_promise(curr, promise, work);
848 earliest_promise_reader = NULL;
850 for (unsigned int i = 0; i < send_fv.size(); i++) {
851 ModelAction *read = send_fv[i];
853 /* Don't send future values to reads after the Promise we resolve */
854 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
855 /* Check if future value can be sent immediately */
856 if (promises_may_allow(curr, read)) {
857 add_future_value(curr, read);
859 futurevalues.push_back(PendingFutureValue(curr, read));
864 /* Check the pending future values */
865 for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
866 struct PendingFutureValue pfv = futurevalues[i];
867 if (promises_may_allow(pfv.writer, pfv.reader)) {
868 add_future_value(pfv.writer, pfv.reader);
869 futurevalues.erase(futurevalues.begin() + i);
873 mo_graph->commitChanges();
874 mo_check_promises(curr, false);
876 get_thread(curr)->set_return_value(VALUE_NONE);
877 return updated_mod_order || updated_promises;
881 * Process a fence ModelAction
882 * @param curr The ModelAction to process
883 * @return True if synchronization was updated
885 bool ModelExecution::process_fence(ModelAction *curr)
888 * fence-relaxed: no-op
889 * fence-release: only log the occurence (not in this function), for
890 * use in later synchronization
891 * fence-acquire (this function): search for hypothetical release
893 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
895 bool updated = false;
896 if (curr->is_acquire()) {
897 action_list_t *list = &action_trace;
898 action_list_t::reverse_iterator rit;
899 /* Find X : is_read(X) && X --sb-> curr */
900 for (rit = list->rbegin(); rit != list->rend(); rit++) {
901 ModelAction *act = *rit;
904 if (act->get_tid() != curr->get_tid())
906 /* Stop at the beginning of the thread */
907 if (act->is_thread_start())
909 /* Stop once we reach a prior fence-acquire */
910 if (act->is_fence() && act->is_acquire())
914 /* read-acquire will find its own release sequences */
915 if (act->is_acquire())
918 /* Establish hypothetical release sequences */
919 rel_heads_list_t release_heads;
920 get_release_seq_heads(curr, act, &release_heads);
921 for (unsigned int i = 0; i < release_heads.size(); i++)
922 synchronize(release_heads[i], curr);
923 if (release_heads.size() != 0)
931 * @brief Process the current action for thread-related activity
933 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
934 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
935 * synchronization, etc. This function is a no-op for non-THREAD actions
936 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
938 * @param curr The current action
939 * @return True if synchronization was updated or a thread completed
941 bool ModelExecution::process_thread_action(ModelAction *curr)
943 bool updated = false;
945 switch (curr->get_type()) {
946 case THREAD_CREATE: {
947 thrd_t *thrd = (thrd_t *)curr->get_location();
948 struct thread_params *params = (struct thread_params *)curr->get_value();
949 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
951 th->set_creation(curr);
952 /* Promises can be satisfied by children */
953 for (unsigned int i = 0; i < promises.size(); i++) {
954 Promise *promise = promises[i];
955 if (promise->thread_is_available(curr->get_tid()))
956 promise->add_thread(th->get_id());
961 Thread *blocking = curr->get_thread_operand();
962 ModelAction *act = get_last_action(blocking->get_id());
963 synchronize(act, curr);
964 updated = true; /* trigger rel-seq checks */
967 case THREAD_FINISH: {
968 Thread *th = get_thread(curr);
969 /* Wake up any joining threads */
970 for (unsigned int i = 0; i < get_num_threads(); i++) {
971 Thread *waiting = get_thread(int_to_id(i));
972 if (waiting->waiting_on() == th &&
973 waiting->get_pending()->is_thread_join())
974 scheduler->wake(waiting);
977 /* Completed thread can't satisfy promises */
978 for (unsigned int i = 0; i < promises.size(); i++) {
979 Promise *promise = promises[i];
980 if (promise->thread_is_available(th->get_id()))
981 if (promise->eliminate_thread(th->get_id()))
982 priv->failed_promise = true;
984 updated = true; /* trigger rel-seq checks */
988 check_promises(curr->get_tid(), NULL, curr->get_cv());
999 * @brief Process the current action for release sequence fixup activity
1001 * Performs model-checker release sequence fixups for the current action,
1002 * forcing a single pending release sequence to break (with a given, potential
1003 * "loose" write) or to complete (i.e., synchronize). If a pending release
1004 * sequence forms a complete release sequence, then we must perform the fixup
1005 * synchronization, mo_graph additions, etc.
1007 * @param curr The current action; must be a release sequence fixup action
1008 * @param work_queue The work queue to which to add work items as they are
1011 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1013 const ModelAction *write = curr->get_node()->get_relseq_break();
1014 struct release_seq *sequence = pending_rel_seqs.back();
1015 pending_rel_seqs.pop_back();
1017 ModelAction *acquire = sequence->acquire;
1018 const ModelAction *rf = sequence->rf;
1019 const ModelAction *release = sequence->release;
1023 ASSERT(release->same_thread(rf));
1025 if (write == NULL) {
1027 * @todo Forcing a synchronization requires that we set
1028 * modification order constraints. For instance, we can't allow
1029 * a fixup sequence in which two separate read-acquire
1030 * operations read from the same sequence, where the first one
1031 * synchronizes and the other doesn't. Essentially, we can't
1032 * allow any writes to insert themselves between 'release' and
1036 /* Must synchronize */
1037 if (!synchronize(release, acquire))
1040 /* Propagate the changed clock vector */
1041 propagate_clockvector(acquire, work_queue);
1043 /* Break release sequence with new edges:
1044 * release --mo--> write --mo--> rf */
1045 mo_graph->addEdge(release, write);
1046 mo_graph->addEdge(write, rf);
1049 /* See if we have realized a data race */
1054 * Initialize the current action by performing one or more of the following
1055 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1056 * in the NodeStack, manipulating backtracking sets, allocating and
1057 * initializing clock vectors, and computing the promises to fulfill.
1059 * @param curr The current action, as passed from the user context; may be
1060 * freed/invalidated after the execution of this function, with a different
1061 * action "returned" its place (pass-by-reference)
1062 * @return True if curr is a newly-explored action; false otherwise
1064 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1066 ModelAction *newcurr;
1068 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1069 newcurr = process_rmw(*curr);
1072 if (newcurr->is_rmw())
1073 compute_promises(newcurr);
1079 (*curr)->set_seq_number(get_next_seq_num());
1081 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1083 /* First restore type and order in case of RMW operation */
1084 if ((*curr)->is_rmwr())
1085 newcurr->copy_typeandorder(*curr);
1087 ASSERT((*curr)->get_location() == newcurr->get_location());
1088 newcurr->copy_from_new(*curr);
1090 /* Discard duplicate ModelAction; use action from NodeStack */
1093 /* Always compute new clock vector */
1094 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1097 return false; /* Action was explored previously */
1101 /* Always compute new clock vector */
1102 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1104 /* Assign most recent release fence */
1105 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1108 * Perform one-time actions when pushing new ModelAction onto
1111 if (newcurr->is_write())
1112 compute_promises(newcurr);
1113 else if (newcurr->is_relseq_fixup())
1114 compute_relseq_breakwrites(newcurr);
1115 else if (newcurr->is_wait())
1116 newcurr->get_node()->set_misc_max(2);
1117 else if (newcurr->is_notify_one()) {
1118 newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1120 return true; /* This was a new ModelAction */
1125 * @brief Establish reads-from relation between two actions
1127 * Perform basic operations involved with establishing a concrete rf relation,
1128 * including setting the ModelAction data and checking for release sequences.
1130 * @param act The action that is reading (must be a read)
1131 * @param rf The action from which we are reading (must be a write)
1133 * @return True if this read established synchronization
1135 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1138 ASSERT(rf->is_write());
1140 act->set_read_from(rf);
1141 if (act->is_acquire()) {
1142 rel_heads_list_t release_heads;
1143 get_release_seq_heads(act, act, &release_heads);
1144 int num_heads = release_heads.size();
1145 for (unsigned int i = 0; i < release_heads.size(); i++)
1146 if (!synchronize(release_heads[i], act))
1148 return num_heads > 0;
1154 * @brief Synchronizes two actions
1156 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1157 * This function performs the synchronization as well as providing other hooks
1158 * for other checks along with synchronization.
1160 * @param first The left-hand side of the synchronizes-with relation
1161 * @param second The right-hand side of the synchronizes-with relation
1162 * @return True if the synchronization was successful (i.e., was consistent
1163 * with the execution order); false otherwise
1165 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1167 if (*second < *first) {
1168 set_bad_synchronization();
1171 check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1172 return second->synchronize_with(first);
1176 * Check promises and eliminate potentially-satisfying threads when a thread is
1177 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1178 * no longer satisfy a promise generated from that thread.
1180 * @param blocker The thread on which a thread is waiting
1181 * @param waiting The waiting thread
1183 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1185 for (unsigned int i = 0; i < promises.size(); i++) {
1186 Promise *promise = promises[i];
1187 if (!promise->thread_is_available(waiting->get_id()))
1189 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1190 ModelAction *reader = promise->get_reader(j);
1191 if (reader->get_tid() != blocker->get_id())
1193 if (promise->eliminate_thread(waiting->get_id())) {
1194 /* Promise has failed */
1195 priv->failed_promise = true;
1197 /* Only eliminate the 'waiting' thread once */
1205 * @brief Check whether a model action is enabled.
1207 * Checks whether an operation would be successful (i.e., is a lock already
1208 * locked, or is the joined thread already complete).
1210 * For yield-blocking, yields are never enabled.
1212 * @param curr is the ModelAction to check whether it is enabled.
1213 * @return a bool that indicates whether the action is enabled.
1215 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1216 if (curr->is_lock()) {
1217 std::mutex *lock = curr->get_mutex();
1218 struct std::mutex_state *state = lock->get_state();
1221 } else if (curr->is_thread_join()) {
1222 Thread *blocking = curr->get_thread_operand();
1223 if (!blocking->is_complete()) {
1224 thread_blocking_check_promises(blocking, get_thread(curr));
1227 } else if (params->yieldblock && curr->is_yield()) {
1235 * This is the heart of the model checker routine. It performs model-checking
1236 * actions corresponding to a given "current action." Among other processes, it
1237 * calculates reads-from relationships, updates synchronization clock vectors,
1238 * forms a memory_order constraints graph, and handles replay/backtrack
1239 * execution when running permutations of previously-observed executions.
1241 * @param curr The current action to process
1242 * @return The ModelAction that is actually executed; may be different than
1245 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1248 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1249 bool newly_explored = initialize_curr_action(&curr);
1253 wake_up_sleeping_actions(curr);
1255 /* Compute fairness information for CHESS yield algorithm */
1256 if (params->yieldon) {
1257 curr->get_node()->update_yield(scheduler);
1260 /* Add the action to lists before any other model-checking tasks */
1261 if (!second_part_of_rmw)
1262 add_action_to_lists(curr);
1264 /* Build may_read_from set for newly-created actions */
1265 if (newly_explored && curr->is_read())
1266 build_may_read_from(curr);
1268 /* Initialize work_queue with the "current action" work */
1269 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1270 while (!work_queue.empty() && !has_asserted()) {
1271 WorkQueueEntry work = work_queue.front();
1272 work_queue.pop_front();
1274 switch (work.type) {
1275 case WORK_CHECK_CURR_ACTION: {
1276 ModelAction *act = work.action;
1277 bool update = false; /* update this location's release seq's */
1278 bool update_all = false; /* update all release seq's */
1280 if (process_thread_action(curr))
1283 if (act->is_read() && !second_part_of_rmw && process_read(act))
1286 if (act->is_write() && process_write(act, &work_queue))
1289 if (act->is_fence() && process_fence(act))
1292 if (act->is_mutex_op() && process_mutex(act))
1295 if (act->is_relseq_fixup())
1296 process_relseq_fixup(curr, &work_queue);
1299 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1301 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1304 case WORK_CHECK_RELEASE_SEQ:
1305 resolve_release_sequences(work.location, &work_queue);
1307 case WORK_CHECK_MO_EDGES: {
1308 /** @todo Complete verification of work_queue */
1309 ModelAction *act = work.action;
1310 bool updated = false;
1312 if (act->is_read()) {
1313 const ModelAction *rf = act->get_reads_from();
1314 const Promise *promise = act->get_reads_from_promise();
1316 if (r_modification_order(act, rf))
1318 if (act->is_seqcst()) {
1319 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1320 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1324 } else if (promise) {
1325 if (r_modification_order(act, promise))
1329 if (act->is_write()) {
1330 if (w_modification_order(act, NULL))
1333 mo_graph->commitChanges();
1336 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1345 check_curr_backtracking(curr);
1346 set_backtracking(curr);
1350 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1352 Node *currnode = curr->get_node();
1353 Node *parnode = currnode->get_parent();
1355 if ((parnode && !parnode->backtrack_empty()) ||
1356 !currnode->misc_empty() ||
1357 !currnode->read_from_empty() ||
1358 !currnode->promise_empty() ||
1359 !currnode->relseq_break_empty()) {
1360 set_latest_backtrack(curr);
1364 bool ModelExecution::promises_expired() const
1366 for (unsigned int i = 0; i < promises.size(); i++) {
1367 Promise *promise = promises[i];
1368 if (promise->get_expiration() < priv->used_sequence_numbers)
1375 * This is the strongest feasibility check available.
1376 * @return whether the current trace (partial or complete) must be a prefix of
1379 bool ModelExecution::isfeasibleprefix() const
1381 return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1385 * Print disagnostic information about an infeasible execution
1386 * @param prefix A string to prefix the output with; if NULL, then a default
1387 * message prefix will be provided
1389 void ModelExecution::print_infeasibility(const char *prefix) const
1393 if (mo_graph->checkForCycles())
1394 ptr += sprintf(ptr, "[mo cycle]");
1395 if (priv->failed_promise || priv->hard_failed_promise)
1396 ptr += sprintf(ptr, "[failed promise]");
1397 if (priv->too_many_reads)
1398 ptr += sprintf(ptr, "[too many reads]");
1399 if (priv->no_valid_reads)
1400 ptr += sprintf(ptr, "[no valid reads-from]");
1401 if (priv->bad_synchronization)
1402 ptr += sprintf(ptr, "[bad sw ordering]");
1403 if (priv->bad_sc_read)
1404 ptr += sprintf(ptr, "[bad sc read]");
1405 if (promises_expired())
1406 ptr += sprintf(ptr, "[promise expired]");
1407 if (promises.size() != 0)
1408 ptr += sprintf(ptr, "[unresolved promise]");
1410 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1414 * Returns whether the current completed trace is feasible, except for pending
1415 * release sequences.
1417 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1419 return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1424 * Check if the current partial trace is infeasible. Does not check any
1425 * end-of-execution flags, which might rule out the execution. Thus, this is
1426 * useful only for ruling an execution as infeasible.
1427 * @return whether the current partial trace is infeasible.
1429 bool ModelExecution::is_infeasible() const
1431 return mo_graph->checkForCycles() ||
1432 priv->no_valid_reads ||
1433 priv->too_many_reads ||
1434 priv->bad_synchronization ||
1435 priv->bad_sc_read ||
1436 priv->hard_failed_promise ||
1440 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1441 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1442 ModelAction *lastread = get_last_action(act->get_tid());
1443 lastread->process_rmw(act);
1444 if (act->is_rmw()) {
1445 if (lastread->get_reads_from())
1446 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1448 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1449 mo_graph->commitChanges();
1455 * A helper function for ModelExecution::check_recency, to check if the current
1456 * thread is able to read from a different write/promise for 'params.maxreads'
1457 * number of steps and if that write/promise should become visible (i.e., is
1458 * ordered later in the modification order). This helps model memory liveness.
1460 * @param curr The current action. Must be a read.
1461 * @param rf The write/promise from which we plan to read
1462 * @param other_rf The write/promise from which we may read
1463 * @return True if we were able to read from other_rf for params.maxreads steps
1465 template <typename T, typename U>
1466 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1468 /* Need a different write/promise */
1469 if (other_rf->equals(rf))
1472 /* Only look for "newer" writes/promises */
1473 if (!mo_graph->checkReachable(rf, other_rf))
1476 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1477 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1478 action_list_t::reverse_iterator rit = list->rbegin();
1479 ASSERT((*rit) == curr);
1480 /* Skip past curr */
1483 /* Does this write/promise work for everyone? */
1484 for (int i = 0; i < params->maxreads; i++, rit++) {
1485 ModelAction *act = *rit;
1486 if (!act->may_read_from(other_rf))
1493 * Checks whether a thread has read from the same write or Promise for too many
1494 * times without seeing the effects of a later write/Promise.
1497 * 1) there must a different write/promise that we could read from,
1498 * 2) we must have read from the same write/promise in excess of maxreads times,
1499 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1500 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1502 * If so, we decide that the execution is no longer feasible.
1504 * @param curr The current action. Must be a read.
1505 * @param rf The ModelAction/Promise from which we might read.
1506 * @return True if the read should succeed; false otherwise
1508 template <typename T>
1509 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1511 if (!params->maxreads)
1514 //NOTE: Next check is just optimization, not really necessary....
1515 if (curr->get_node()->get_read_from_past_size() +
1516 curr->get_node()->get_read_from_promise_size() <= 1)
1519 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1520 int tid = id_to_int(curr->get_tid());
1521 ASSERT(tid < (int)thrd_lists->size());
1522 action_list_t *list = &(*thrd_lists)[tid];
1523 action_list_t::reverse_iterator rit = list->rbegin();
1524 ASSERT((*rit) == curr);
1525 /* Skip past curr */
1528 action_list_t::reverse_iterator ritcopy = rit;
1529 /* See if we have enough reads from the same value */
1530 for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1531 if (ritcopy == list->rend())
1533 ModelAction *act = *ritcopy;
1534 if (!act->is_read())
1536 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1538 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1540 if (act->get_node()->get_read_from_past_size() +
1541 act->get_node()->get_read_from_promise_size() <= 1)
1544 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1545 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1546 if (should_read_instead(curr, rf, write))
1547 return false; /* liveness failure */
1549 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1550 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1551 if (should_read_instead(curr, rf, promise))
1552 return false; /* liveness failure */
1558 * @brief Updates the mo_graph with the constraints imposed from the current
1561 * Basic idea is the following: Go through each other thread and find
1562 * the last action that happened before our read. Two cases:
1564 * -# The action is a write: that write must either occur before
1565 * the write we read from or be the write we read from.
1566 * -# The action is a read: the write that that action read from
1567 * must occur before the write we read from or be the same write.
1569 * @param curr The current action. Must be a read.
1570 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1571 * @return True if modification order edges were added; false otherwise
1573 template <typename rf_type>
1574 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1576 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1579 ASSERT(curr->is_read());
1581 /* Last SC fence in the current thread */
1582 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1583 ModelAction *last_sc_write = NULL;
1584 if (curr->is_seqcst())
1585 last_sc_write = get_last_seq_cst_write(curr);
1587 /* Iterate over all threads */
1588 for (i = 0; i < thrd_lists->size(); i++) {
1589 /* Last SC fence in thread i */
1590 ModelAction *last_sc_fence_thread_local = NULL;
1591 if (int_to_id((int)i) != curr->get_tid())
1592 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1594 /* Last SC fence in thread i, before last SC fence in current thread */
1595 ModelAction *last_sc_fence_thread_before = NULL;
1596 if (last_sc_fence_local)
1597 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1599 /* Iterate over actions in thread, starting from most recent */
1600 action_list_t *list = &(*thrd_lists)[i];
1601 action_list_t::reverse_iterator rit;
1602 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1603 ModelAction *act = *rit;
1608 /* Don't want to add reflexive edges on 'rf' */
1609 if (act->equals(rf)) {
1610 if (act->happens_before(curr))
1616 if (act->is_write()) {
1617 /* C++, Section 29.3 statement 5 */
1618 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1619 *act < *last_sc_fence_thread_local) {
1620 added = mo_graph->addEdge(act, rf) || added;
1623 /* C++, Section 29.3 statement 4 */
1624 else if (act->is_seqcst() && last_sc_fence_local &&
1625 *act < *last_sc_fence_local) {
1626 added = mo_graph->addEdge(act, rf) || added;
1629 /* C++, Section 29.3 statement 6 */
1630 else if (last_sc_fence_thread_before &&
1631 *act < *last_sc_fence_thread_before) {
1632 added = mo_graph->addEdge(act, rf) || added;
1638 * Include at most one act per-thread that "happens
1641 if (act->happens_before(curr)) {
1642 if (act->is_write()) {
1643 added = mo_graph->addEdge(act, rf) || added;
1645 const ModelAction *prevrf = act->get_reads_from();
1646 const Promise *prevrf_promise = act->get_reads_from_promise();
1648 if (!prevrf->equals(rf))
1649 added = mo_graph->addEdge(prevrf, rf) || added;
1650 } else if (!prevrf_promise->equals(rf)) {
1651 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1660 * All compatible, thread-exclusive promises must be ordered after any
1661 * concrete loads from the same thread
1663 for (unsigned int i = 0; i < promises.size(); i++)
1664 if (promises[i]->is_compatible_exclusive(curr))
1665 added = mo_graph->addEdge(rf, promises[i]) || added;
1671 * Updates the mo_graph with the constraints imposed from the current write.
1673 * Basic idea is the following: Go through each other thread and find
1674 * the lastest action that happened before our write. Two cases:
1676 * (1) The action is a write => that write must occur before
1679 * (2) The action is a read => the write that that action read from
1680 * must occur before the current write.
1682 * This method also handles two other issues:
1684 * (I) Sequential Consistency: Making sure that if the current write is
1685 * seq_cst, that it occurs after the previous seq_cst write.
1687 * (II) Sending the write back to non-synchronizing reads.
1689 * @param curr The current action. Must be a write.
1690 * @param send_fv A vector for stashing reads to which we may pass our future
1691 * value. If NULL, then don't record any future values.
1692 * @return True if modification order edges were added; false otherwise
1694 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1696 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1699 ASSERT(curr->is_write());
1701 if (curr->is_seqcst()) {
1702 /* We have to at least see the last sequentially consistent write,
1703 so we are initialized. */
1704 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1705 if (last_seq_cst != NULL) {
1706 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1710 /* Last SC fence in the current thread */
1711 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1713 /* Iterate over all threads */
1714 for (i = 0; i < thrd_lists->size(); i++) {
1715 /* Last SC fence in thread i, before last SC fence in current thread */
1716 ModelAction *last_sc_fence_thread_before = NULL;
1717 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1718 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1720 /* Iterate over actions in thread, starting from most recent */
1721 action_list_t *list = &(*thrd_lists)[i];
1722 action_list_t::reverse_iterator rit;
1723 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1724 ModelAction *act = *rit;
1727 * 1) If RMW and it actually read from something, then we
1728 * already have all relevant edges, so just skip to next
1731 * 2) If RMW and it didn't read from anything, we should
1732 * whatever edge we can get to speed up convergence.
1734 * 3) If normal write, we need to look at earlier actions, so
1735 * continue processing list.
1737 if (curr->is_rmw()) {
1738 if (curr->get_reads_from() != NULL)
1746 /* C++, Section 29.3 statement 7 */
1747 if (last_sc_fence_thread_before && act->is_write() &&
1748 *act < *last_sc_fence_thread_before) {
1749 added = mo_graph->addEdge(act, curr) || added;
1754 * Include at most one act per-thread that "happens
1757 if (act->happens_before(curr)) {
1759 * Note: if act is RMW, just add edge:
1761 * The following edge should be handled elsewhere:
1762 * readfrom(act) --mo--> act
1764 if (act->is_write())
1765 added = mo_graph->addEdge(act, curr) || added;
1766 else if (act->is_read()) {
1767 //if previous read accessed a null, just keep going
1768 if (act->get_reads_from() == NULL) {
1769 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1771 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1774 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1775 !act->same_thread(curr)) {
1776 /* We have an action that:
1777 (1) did not happen before us
1778 (2) is a read and we are a write
1779 (3) cannot synchronize with us
1780 (4) is in a different thread
1782 that read could potentially read from our write. Note that
1783 these checks are overly conservative at this point, we'll
1784 do more checks before actually removing the
1789 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1790 if (!is_infeasible())
1791 send_fv->push_back(act);
1792 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1793 add_future_value(curr, act);
1800 * All compatible, thread-exclusive promises must be ordered after any
1801 * concrete stores to the same thread, or else they can be merged with
1804 for (unsigned int i = 0; i < promises.size(); i++)
1805 if (promises[i]->is_compatible_exclusive(curr))
1806 added = mo_graph->addEdge(curr, promises[i]) || added;
1811 //This procedure uses cohere to prune future values that are
1812 //guaranteed to generate a coherence violation.
1814 //need to see if there is (1) a promise for thread write, (2)
1815 //the promise is sb before write, (3) the promise can only be
1816 //resolved by the thread read, and (4) the promise has same
1817 //location as read/write
1819 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1820 thread_id_t write_tid=write->get_tid();
1821 for(unsigned int i = promises.size(); i>0; i--) {
1822 Promise *pr=promises[i-1];
1823 if (!pr->same_location(write))
1825 //the reading thread is the only thread that can resolve the promise
1826 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1827 for(unsigned int j=0;j<pr->get_num_readers();j++) {
1828 ModelAction *prreader=pr->get_reader(j);
1829 //the writing thread reads from the promise before the write
1830 if (prreader->get_tid()==write_tid &&
1831 (*prreader)<(*write)) {
1832 if ((*read)>(*prreader)) {
1833 //check that we don't have a read between the read and promise
1834 //from the same thread as read
1836 for(const ModelAction *tmp=read;tmp!=prreader;) {
1837 tmp=tmp->get_node()->get_parent()->get_action();
1838 if (tmp->is_read() && tmp->same_thread(read)) {
1855 /** Arbitrary reads from the future are not allowed. Section 29.3
1856 * part 9 places some constraints. This method checks one result of constraint
1857 * constraint. Others require compiler support. */
1858 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1860 if (!writer->is_rmw())
1863 if (!reader->is_rmw())
1866 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1867 if (search == reader)
1869 if (search->get_tid() == reader->get_tid() &&
1870 search->happens_before(reader))
1878 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1879 * some constraints. This method checks one the following constraint (others
1880 * require compiler support):
1882 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1883 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1885 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1887 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1889 /* Iterate over all threads */
1890 for (i = 0; i < thrd_lists->size(); i++) {
1891 const ModelAction *write_after_read = NULL;
1893 /* Iterate over actions in thread, starting from most recent */
1894 action_list_t *list = &(*thrd_lists)[i];
1895 action_list_t::reverse_iterator rit;
1896 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1897 ModelAction *act = *rit;
1899 /* Don't disallow due to act == reader */
1900 if (!reader->happens_before(act) || reader == act)
1902 else if (act->is_write())
1903 write_after_read = act;
1904 else if (act->is_read() && act->get_reads_from() != NULL)
1905 write_after_read = act->get_reads_from();
1908 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1915 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1916 * The ModelAction under consideration is expected to be taking part in
1917 * release/acquire synchronization as an object of the "reads from" relation.
1918 * Note that this can only provide release sequence support for RMW chains
1919 * which do not read from the future, as those actions cannot be traced until
1920 * their "promise" is fulfilled. Similarly, we may not even establish the
1921 * presence of a release sequence with certainty, as some modification order
1922 * constraints may be decided further in the future. Thus, this function
1923 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1924 * and a boolean representing certainty.
1926 * @param rf The action that might be part of a release sequence. Must be a
1928 * @param release_heads A pass-by-reference style return parameter. After
1929 * execution of this function, release_heads will contain the heads of all the
1930 * relevant release sequences, if any exists with certainty
1931 * @param pending A pass-by-reference style return parameter which is only used
1932 * when returning false (i.e., uncertain). Returns most information regarding
1933 * an uncertain release sequence, including any write operations that might
1934 * break the sequence.
1935 * @return true, if the ModelExecution is certain that release_heads is complete;
1938 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1939 rel_heads_list_t *release_heads,
1940 struct release_seq *pending) const
1942 /* Only check for release sequences if there are no cycles */
1943 if (mo_graph->checkForCycles())
1946 for ( ; rf != NULL; rf = rf->get_reads_from()) {
1947 ASSERT(rf->is_write());
1949 if (rf->is_release())
1950 release_heads->push_back(rf);
1951 else if (rf->get_last_fence_release())
1952 release_heads->push_back(rf->get_last_fence_release());
1954 break; /* End of RMW chain */
1956 /** @todo Need to be smarter here... In the linux lock
1957 * example, this will run to the beginning of the program for
1959 /** @todo The way to be smarter here is to keep going until 1
1960 * thread has a release preceded by an acquire and you've seen
1963 /* acq_rel RMW is a sufficient stopping condition */
1964 if (rf->is_acquire() && rf->is_release())
1965 return true; /* complete */
1968 /* read from future: need to settle this later */
1970 return false; /* incomplete */
1973 if (rf->is_release())
1974 return true; /* complete */
1976 /* else relaxed write
1977 * - check for fence-release in the same thread (29.8, stmt. 3)
1978 * - check modification order for contiguous subsequence
1979 * -> rf must be same thread as release */
1981 const ModelAction *fence_release = rf->get_last_fence_release();
1982 /* Synchronize with a fence-release unconditionally; we don't need to
1983 * find any more "contiguous subsequence..." for it */
1985 release_heads->push_back(fence_release);
1987 int tid = id_to_int(rf->get_tid());
1988 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1989 action_list_t *list = &(*thrd_lists)[tid];
1990 action_list_t::const_reverse_iterator rit;
1992 /* Find rf in the thread list */
1993 rit = std::find(list->rbegin(), list->rend(), rf);
1994 ASSERT(rit != list->rend());
1996 /* Find the last {write,fence}-release */
1997 for (; rit != list->rend(); rit++) {
1998 if (fence_release && *(*rit) < *fence_release)
2000 if ((*rit)->is_release())
2003 if (rit == list->rend()) {
2004 /* No write-release in this thread */
2005 return true; /* complete */
2006 } else if (fence_release && *(*rit) < *fence_release) {
2007 /* The fence-release is more recent (and so, "stronger") than
2008 * the most recent write-release */
2009 return true; /* complete */
2010 } /* else, need to establish contiguous release sequence */
2011 ModelAction *release = *rit;
2013 ASSERT(rf->same_thread(release));
2015 pending->writes.clear();
2017 bool certain = true;
2018 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2019 if (id_to_int(rf->get_tid()) == (int)i)
2021 list = &(*thrd_lists)[i];
2023 /* Can we ensure no future writes from this thread may break
2024 * the release seq? */
2025 bool future_ordered = false;
2027 ModelAction *last = get_last_action(int_to_id(i));
2028 Thread *th = get_thread(int_to_id(i));
2029 if ((last && rf->happens_before(last)) ||
2032 future_ordered = true;
2034 ASSERT(!th->is_model_thread() || future_ordered);
2036 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2037 const ModelAction *act = *rit;
2038 /* Reach synchronization -> this thread is complete */
2039 if (act->happens_before(release))
2041 if (rf->happens_before(act)) {
2042 future_ordered = true;
2046 /* Only non-RMW writes can break release sequences */
2047 if (!act->is_write() || act->is_rmw())
2050 /* Check modification order */
2051 if (mo_graph->checkReachable(rf, act)) {
2052 /* rf --mo--> act */
2053 future_ordered = true;
2056 if (mo_graph->checkReachable(act, release))
2057 /* act --mo--> release */
2059 if (mo_graph->checkReachable(release, act) &&
2060 mo_graph->checkReachable(act, rf)) {
2061 /* release --mo-> act --mo--> rf */
2062 return true; /* complete */
2064 /* act may break release sequence */
2065 pending->writes.push_back(act);
2068 if (!future_ordered)
2069 certain = false; /* This thread is uncertain */
2073 release_heads->push_back(release);
2074 pending->writes.clear();
2076 pending->release = release;
2083 * An interface for getting the release sequence head(s) with which a
2084 * given ModelAction must synchronize. This function only returns a non-empty
2085 * result when it can locate a release sequence head with certainty. Otherwise,
2086 * it may mark the internal state of the ModelExecution so that it will handle
2087 * the release sequence at a later time, causing @a acquire to update its
2088 * synchronization at some later point in execution.
2090 * @param acquire The 'acquire' action that may synchronize with a release
2092 * @param read The read action that may read from a release sequence; this may
2093 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2094 * when 'acquire' is a fence-acquire)
2095 * @param release_heads A pass-by-reference return parameter. Will be filled
2096 * with the head(s) of the release sequence(s), if they exists with certainty.
2097 * @see ModelExecution::release_seq_heads
2099 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2100 ModelAction *read, rel_heads_list_t *release_heads)
2102 const ModelAction *rf = read->get_reads_from();
2103 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2104 sequence->acquire = acquire;
2105 sequence->read = read;
2107 if (!release_seq_heads(rf, release_heads, sequence)) {
2108 /* add act to 'lazy checking' list */
2109 pending_rel_seqs.push_back(sequence);
2111 snapshot_free(sequence);
2116 * @brief Propagate a modified clock vector to actions later in the execution
2119 * After an acquire operation lazily completes a release-sequence
2120 * synchronization, we must update all clock vectors for operations later than
2121 * the acquire in the execution order.
2123 * @param acquire The ModelAction whose clock vector must be propagated
2124 * @param work The work queue to which we can add work items, if this
2125 * propagation triggers more updates (e.g., to the modification order)
2127 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2129 /* Re-check all pending release sequences */
2130 work->push_back(CheckRelSeqWorkEntry(NULL));
2131 /* Re-check read-acquire for mo_graph edges */
2132 work->push_back(MOEdgeWorkEntry(acquire));
2134 /* propagate synchronization to later actions */
2135 action_list_t::reverse_iterator rit = action_trace.rbegin();
2136 for (; (*rit) != acquire; rit++) {
2137 ModelAction *propagate = *rit;
2138 if (acquire->happens_before(propagate)) {
2139 synchronize(acquire, propagate);
2140 /* Re-check 'propagate' for mo_graph edges */
2141 work->push_back(MOEdgeWorkEntry(propagate));
2147 * Attempt to resolve all stashed operations that might synchronize with a
2148 * release sequence for a given location. This implements the "lazy" portion of
2149 * determining whether or not a release sequence was contiguous, since not all
2150 * modification order information is present at the time an action occurs.
2152 * @param location The location/object that should be checked for release
2153 * sequence resolutions. A NULL value means to check all locations.
2154 * @param work_queue The work queue to which to add work items as they are
2156 * @return True if any updates occurred (new synchronization, new mo_graph
2159 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2161 bool updated = false;
2162 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2163 while (it != pending_rel_seqs.end()) {
2164 struct release_seq *pending = *it;
2165 ModelAction *acquire = pending->acquire;
2166 const ModelAction *read = pending->read;
2168 /* Only resolve sequences on the given location, if provided */
2169 if (location && read->get_location() != location) {
2174 const ModelAction *rf = read->get_reads_from();
2175 rel_heads_list_t release_heads;
2177 complete = release_seq_heads(rf, &release_heads, pending);
2178 for (unsigned int i = 0; i < release_heads.size(); i++)
2179 if (!acquire->has_synchronized_with(release_heads[i]))
2180 if (synchronize(release_heads[i], acquire))
2184 /* Propagate the changed clock vector */
2185 propagate_clockvector(acquire, work_queue);
2188 it = pending_rel_seqs.erase(it);
2189 snapshot_free(pending);
2195 // If we resolved promises or data races, see if we have realized a data race.
2202 * Performs various bookkeeping operations for the current ModelAction. For
2203 * instance, adds action to the per-object, per-thread action vector and to the
2204 * action trace list of all thread actions.
2206 * @param act is the ModelAction to add.
2208 void ModelExecution::add_action_to_lists(ModelAction *act)
2210 int tid = id_to_int(act->get_tid());
2211 ModelAction *uninit = NULL;
2213 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2214 if (list->empty() && act->is_atomic_var()) {
2215 uninit = get_uninitialized_action(act);
2216 uninit_id = id_to_int(uninit->get_tid());
2217 list->push_front(uninit);
2219 list->push_back(act);
2221 action_trace.push_back(act);
2223 action_trace.push_front(uninit);
2225 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2226 if (tid >= (int)vec->size())
2227 vec->resize(priv->next_thread_id);
2228 (*vec)[tid].push_back(act);
2230 (*vec)[uninit_id].push_front(uninit);
2232 if ((int)thrd_last_action.size() <= tid)
2233 thrd_last_action.resize(get_num_threads());
2234 thrd_last_action[tid] = act;
2236 thrd_last_action[uninit_id] = uninit;
2238 if (act->is_fence() && act->is_release()) {
2239 if ((int)thrd_last_fence_release.size() <= tid)
2240 thrd_last_fence_release.resize(get_num_threads());
2241 thrd_last_fence_release[tid] = act;
2244 if (act->is_wait()) {
2245 void *mutex_loc = (void *) act->get_value();
2246 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2248 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2249 if (tid >= (int)vec->size())
2250 vec->resize(priv->next_thread_id);
2251 (*vec)[tid].push_back(act);
2256 * @brief Get the last action performed by a particular Thread
2257 * @param tid The thread ID of the Thread in question
2258 * @return The last action in the thread
2260 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2262 int threadid = id_to_int(tid);
2263 if (threadid < (int)thrd_last_action.size())
2264 return thrd_last_action[id_to_int(tid)];
2270 * @brief Get the last fence release performed by a particular Thread
2271 * @param tid The thread ID of the Thread in question
2272 * @return The last fence release in the thread, if one exists; NULL otherwise
2274 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2276 int threadid = id_to_int(tid);
2277 if (threadid < (int)thrd_last_fence_release.size())
2278 return thrd_last_fence_release[id_to_int(tid)];
2284 * Gets the last memory_order_seq_cst write (in the total global sequence)
2285 * performed on a particular object (i.e., memory location), not including the
2287 * @param curr The current ModelAction; also denotes the object location to
2289 * @return The last seq_cst write
2291 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2293 void *location = curr->get_location();
2294 action_list_t *list = obj_map.get(location);
2295 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2296 action_list_t::reverse_iterator rit;
2297 for (rit = list->rbegin(); (*rit) != curr; rit++)
2299 rit++; /* Skip past curr */
2300 for ( ; rit != list->rend(); rit++)
2301 if ((*rit)->is_write() && (*rit)->is_seqcst())
2307 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2308 * performed in a particular thread, prior to a particular fence.
2309 * @param tid The ID of the thread to check
2310 * @param before_fence The fence from which to begin the search; if NULL, then
2311 * search for the most recent fence in the thread.
2312 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2314 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2316 /* All fences should have location FENCE_LOCATION */
2317 action_list_t *list = obj_map.get(FENCE_LOCATION);
2322 action_list_t::reverse_iterator rit = list->rbegin();
2325 for (; rit != list->rend(); rit++)
2326 if (*rit == before_fence)
2329 ASSERT(*rit == before_fence);
2333 for (; rit != list->rend(); rit++)
2334 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2340 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2341 * location). This function identifies the mutex according to the current
2342 * action, which is presumed to perform on the same mutex.
2343 * @param curr The current ModelAction; also denotes the object location to
2345 * @return The last unlock operation
2347 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2349 void *location = curr->get_location();
2350 action_list_t *list = obj_map.get(location);
2351 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2352 action_list_t::reverse_iterator rit;
2353 for (rit = list->rbegin(); rit != list->rend(); rit++)
2354 if ((*rit)->is_unlock() || (*rit)->is_wait())
2359 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2361 ModelAction *parent = get_last_action(tid);
2363 parent = get_thread(tid)->get_creation();
2368 * Returns the clock vector for a given thread.
2369 * @param tid The thread whose clock vector we want
2370 * @return Desired clock vector
2372 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2374 return get_parent_action(tid)->get_cv();
2378 * @brief Find the promise (if any) to resolve for the current action and
2379 * remove it from the pending promise vector
2380 * @param curr The current ModelAction. Should be a write.
2381 * @return The Promise to resolve, if any; otherwise NULL
2383 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2385 for (unsigned int i = 0; i < promises.size(); i++)
2386 if (curr->get_node()->get_promise(i)) {
2387 Promise *ret = promises[i];
2388 promises.erase(promises.begin() + i);
2395 * Resolve a Promise with a current write.
2396 * @param write The ModelAction that is fulfilling Promises
2397 * @param promise The Promise to resolve
2398 * @param work The work queue, for adding new fixup work
2399 * @return True if the Promise was successfully resolved; false otherwise
2401 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2404 ModelVector<ModelAction *> actions_to_check;
2406 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2407 ModelAction *read = promise->get_reader(i);
2408 if (read_from(read, write)) {
2409 /* Propagate the changed clock vector */
2410 propagate_clockvector(read, work);
2412 actions_to_check.push_back(read);
2414 /* Make sure the promise's value matches the write's value */
2415 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2416 if (!mo_graph->resolvePromise(promise, write))
2417 priv->hard_failed_promise = true;
2420 * @todo It is possible to end up in an inconsistent state, where a
2421 * "resolved" promise may still be referenced if
2422 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2424 * Note that the inconsistency only matters when dumping mo_graph to
2430 //Check whether reading these writes has made threads unable to
2432 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2433 ModelAction *read = actions_to_check[i];
2434 mo_check_promises(read, true);
2441 * Compute the set of promises that could potentially be satisfied by this
2442 * action. Note that the set computation actually appears in the Node, not in
2444 * @param curr The ModelAction that may satisfy promises
2446 void ModelExecution::compute_promises(ModelAction *curr)
2448 for (unsigned int i = 0; i < promises.size(); i++) {
2449 Promise *promise = promises[i];
2450 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2453 bool satisfy = true;
2454 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2455 const ModelAction *act = promise->get_reader(j);
2456 if (act->happens_before(curr) ||
2457 act->could_synchronize_with(curr)) {
2463 curr->get_node()->set_promise(i);
2467 /** Checks promises in response to change in ClockVector Threads. */
2468 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2470 for (unsigned int i = 0; i < promises.size(); i++) {
2471 Promise *promise = promises[i];
2472 if (!promise->thread_is_available(tid))
2474 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2475 const ModelAction *act = promise->get_reader(j);
2476 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2477 merge_cv->synchronized_since(act)) {
2478 if (promise->eliminate_thread(tid)) {
2479 /* Promise has failed */
2480 priv->failed_promise = true;
2488 void ModelExecution::check_promises_thread_disabled()
2490 for (unsigned int i = 0; i < promises.size(); i++) {
2491 Promise *promise = promises[i];
2492 if (promise->has_failed()) {
2493 priv->failed_promise = true;
2500 * @brief Checks promises in response to addition to modification order for
2503 * We test whether threads are still available for satisfying promises after an
2504 * addition to our modification order constraints. Those that are unavailable
2505 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2506 * that promise has failed.
2508 * @param act The ModelAction which updated the modification order
2509 * @param is_read_check Should be true if act is a read and we must check for
2510 * updates to the store from which it read (there is a distinction here for
2511 * RMW's, which are both a load and a store)
2513 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2515 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2517 for (unsigned int i = 0; i < promises.size(); i++) {
2518 Promise *promise = promises[i];
2520 // Is this promise on the same location?
2521 if (!promise->same_location(write))
2524 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2525 const ModelAction *pread = promise->get_reader(j);
2526 if (!pread->happens_before(act))
2528 if (mo_graph->checkPromise(write, promise)) {
2529 priv->hard_failed_promise = true;
2535 // Don't do any lookups twice for the same thread
2536 if (!promise->thread_is_available(act->get_tid()))
2539 if (mo_graph->checkReachable(promise, write)) {
2540 if (mo_graph->checkPromise(write, promise)) {
2541 priv->hard_failed_promise = true;
2549 * Compute the set of writes that may break the current pending release
2550 * sequence. This information is extracted from previou release sequence
2553 * @param curr The current ModelAction. Must be a release sequence fixup
2556 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2558 if (pending_rel_seqs.empty())
2561 struct release_seq *pending = pending_rel_seqs.back();
2562 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2563 const ModelAction *write = pending->writes[i];
2564 curr->get_node()->add_relseq_break(write);
2567 /* NULL means don't break the sequence; just synchronize */
2568 curr->get_node()->add_relseq_break(NULL);
2572 * Build up an initial set of all past writes that this 'read' action may read
2573 * from, as well as any previously-observed future values that must still be valid.
2575 * @param curr is the current ModelAction that we are exploring; it must be a
2578 void ModelExecution::build_may_read_from(ModelAction *curr)
2580 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2582 ASSERT(curr->is_read());
2584 ModelAction *last_sc_write = NULL;
2586 if (curr->is_seqcst())
2587 last_sc_write = get_last_seq_cst_write(curr);
2589 /* Iterate over all threads */
2590 for (i = 0; i < thrd_lists->size(); i++) {
2591 /* Iterate over actions in thread, starting from most recent */
2592 action_list_t *list = &(*thrd_lists)[i];
2593 action_list_t::reverse_iterator rit;
2594 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2595 ModelAction *act = *rit;
2597 /* Only consider 'write' actions */
2598 if (!act->is_write() || act == curr)
2601 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2602 bool allow_read = true;
2604 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2606 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2610 /* Only add feasible reads */
2611 mo_graph->startChanges();
2612 r_modification_order(curr, act);
2613 if (!is_infeasible())
2614 curr->get_node()->add_read_from_past(act);
2615 mo_graph->rollbackChanges();
2618 /* Include at most one act per-thread that "happens before" curr */
2619 if (act->happens_before(curr))
2624 /* Inherit existing, promised future values */
2625 for (i = 0; i < promises.size(); i++) {
2626 const Promise *promise = promises[i];
2627 const ModelAction *promise_read = promise->get_reader(0);
2628 if (promise_read->same_var(curr)) {
2629 /* Only add feasible future-values */
2630 mo_graph->startChanges();
2631 r_modification_order(curr, promise);
2632 if (!is_infeasible())
2633 curr->get_node()->add_read_from_promise(promise_read);
2634 mo_graph->rollbackChanges();
2638 /* We may find no valid may-read-from only if the execution is doomed */
2639 if (!curr->get_node()->read_from_size()) {
2640 priv->no_valid_reads = true;
2644 if (DBG_ENABLED()) {
2645 model_print("Reached read action:\n");
2647 model_print("Printing read_from_past\n");
2648 curr->get_node()->print_read_from_past();
2649 model_print("End printing read_from_past\n");
2653 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2655 for ( ; write != NULL; write = write->get_reads_from()) {
2656 /* UNINIT actions don't have a Node, and they never sleep */
2657 if (write->is_uninitialized())
2659 Node *prevnode = write->get_node()->get_parent();
2661 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2662 if (write->is_release() && thread_sleep)
2664 if (!write->is_rmw())
2671 * @brief Get an action representing an uninitialized atomic
2673 * This function may create a new one or try to retrieve one from the NodeStack
2675 * @param curr The current action, which prompts the creation of an UNINIT action
2676 * @return A pointer to the UNINIT ModelAction
2678 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2680 Node *node = curr->get_node();
2681 ModelAction *act = node->get_uninit_action();
2683 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2684 node->set_uninit_action(act);
2686 act->create_cv(NULL);
2690 static void print_list(const action_list_t *list)
2692 action_list_t::const_iterator it;
2694 model_print("------------------------------------------------------------------------------------\n");
2695 model_print("# t Action type MO Location Value Rf CV\n");
2696 model_print("------------------------------------------------------------------------------------\n");
2698 unsigned int hash = 0;
2700 for (it = list->begin(); it != list->end(); it++) {
2701 const ModelAction *act = *it;
2702 if (act->get_seq_number() > 0)
2704 hash = hash^(hash<<3)^((*it)->hash());
2706 model_print("HASH %u\n", hash);
2707 model_print("------------------------------------------------------------------------------------\n");
2710 #if SUPPORT_MOD_ORDER_DUMP
2711 void ModelExecution::dumpGraph(char *filename) const
2714 sprintf(buffer, "%s.dot", filename);
2715 FILE *file = fopen(buffer, "w");
2716 fprintf(file, "digraph %s {\n", filename);
2717 mo_graph->dumpNodes(file);
2718 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2720 for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2721 ModelAction *act = *it;
2722 if (act->is_read()) {
2723 mo_graph->dot_print_node(file, act);
2724 if (act->get_reads_from())
2725 mo_graph->dot_print_edge(file,
2726 act->get_reads_from(),
2728 "label=\"rf\", color=red, weight=2");
2730 mo_graph->dot_print_edge(file,
2731 act->get_reads_from_promise(),
2733 "label=\"rf\", color=red");
2735 if (thread_array[act->get_tid()]) {
2736 mo_graph->dot_print_edge(file,
2737 thread_array[id_to_int(act->get_tid())],
2739 "label=\"sb\", color=blue, weight=400");
2742 thread_array[act->get_tid()] = act;
2744 fprintf(file, "}\n");
2745 model_free(thread_array);
2750 /** @brief Prints an execution trace summary. */
2751 void ModelExecution::print_summary() const
2753 #if SUPPORT_MOD_ORDER_DUMP
2754 char buffername[100];
2755 sprintf(buffername, "exec%04u", get_execution_number());
2756 mo_graph->dumpGraphToFile(buffername);
2757 sprintf(buffername, "graph%04u", get_execution_number());
2758 dumpGraph(buffername);
2761 model_print("Execution trace %d:", get_execution_number());
2762 if (isfeasibleprefix()) {
2763 if (is_yieldblocked())
2764 model_print(" YIELD BLOCKED");
2765 if (scheduler->all_threads_sleeping())
2766 model_print(" SLEEP-SET REDUNDANT");
2767 if (have_bug_reports())
2768 model_print(" DETECTED BUG(S)");
2770 print_infeasibility(" INFEASIBLE");
2773 print_list(&action_trace);
2776 if (!promises.empty()) {
2777 model_print("Pending promises:\n");
2778 for (unsigned int i = 0; i < promises.size(); i++) {
2779 model_print(" [P%u] ", i);
2780 promises[i]->print();
2787 * Add a Thread to the system for the first time. Should only be called once
2789 * @param t The Thread to add
2791 void ModelExecution::add_thread(Thread *t)
2793 unsigned int i = id_to_int(t->get_id());
2794 if (i >= thread_map.size())
2795 thread_map.resize(i + 1);
2797 if (!t->is_model_thread())
2798 scheduler->add_thread(t);
2802 * @brief Get a Thread reference by its ID
2803 * @param tid The Thread's ID
2804 * @return A Thread reference
2806 Thread * ModelExecution::get_thread(thread_id_t tid) const
2808 unsigned int i = id_to_int(tid);
2809 if (i < thread_map.size())
2810 return thread_map[i];
2815 * @brief Get a reference to the Thread in which a ModelAction was executed
2816 * @param act The ModelAction
2817 * @return A Thread reference
2819 Thread * ModelExecution::get_thread(const ModelAction *act) const
2821 return get_thread(act->get_tid());
2825 * @brief Get a Promise's "promise number"
2827 * A "promise number" is an index number that is unique to a promise, valid
2828 * only for a specific snapshot of an execution trace. Promises may come and go
2829 * as they are generated an resolved, so an index only retains meaning for the
2832 * @param promise The Promise to check
2833 * @return The promise index, if the promise still is valid; otherwise -1
2835 int ModelExecution::get_promise_number(const Promise *promise) const
2837 for (unsigned int i = 0; i < promises.size(); i++)
2838 if (promises[i] == promise)
2845 * @brief Check if a Thread is currently enabled
2846 * @param t The Thread to check
2847 * @return True if the Thread is currently enabled
2849 bool ModelExecution::is_enabled(Thread *t) const
2851 return scheduler->is_enabled(t);
2855 * @brief Check if a Thread is currently enabled
2856 * @param tid The ID of the Thread to check
2857 * @return True if the Thread is currently enabled
2859 bool ModelExecution::is_enabled(thread_id_t tid) const
2861 return scheduler->is_enabled(tid);
2865 * @brief Select the next thread to execute based on the curren action
2867 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2868 * actions should be followed by the execution of their child thread. In either
2869 * case, the current action should determine the next thread schedule.
2871 * @param curr The current action
2872 * @return The next thread to run, if the current action will determine this
2873 * selection; otherwise NULL
2875 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2877 /* Do not split atomic RMW */
2878 if (curr->is_rmwr())
2879 return get_thread(curr);
2880 /* Follow CREATE with the created thread */
2881 if (curr->get_type() == THREAD_CREATE)
2882 return curr->get_thread_operand();
2886 /** @return True if the execution has taken too many steps */
2887 bool ModelExecution::too_many_steps() const
2889 return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2893 * Takes the next step in the execution, if possible.
2894 * @param curr The current step to take
2895 * @return Returns the next Thread to run, if any; NULL if this execution
2898 Thread * ModelExecution::take_step(ModelAction *curr)
2900 Thread *curr_thrd = get_thread(curr);
2901 ASSERT(curr_thrd->get_state() == THREAD_READY);
2903 ASSERT(check_action_enabled(curr)); /* May have side effects? */
2904 curr = check_current_action(curr);
2907 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2908 scheduler->remove_thread(curr_thrd);
2910 return action_select_next_thread(curr);
2914 * Launch end-of-execution release sequence fixups only when
2915 * the execution is otherwise feasible AND there are:
2917 * (1) pending release sequences
2918 * (2) pending assertions that could be invalidated by a change
2919 * in clock vectors (i.e., data races)
2920 * (3) no pending promises
2922 void ModelExecution::fixup_release_sequences()
2924 while (!pending_rel_seqs.empty() &&
2925 is_feasible_prefix_ignore_relseq() &&
2926 haveUnrealizedRaces()) {
2927 model_print("*** WARNING: release sequence fixup action "
2928 "(%zu pending release seuqence(s)) ***\n",
2929 pending_rel_seqs.size());
2930 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2931 std::memory_order_seq_cst, NULL, VALUE_NONE,