10 #include "nodestack.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
22 #define INITIAL_THREAD_ID 0
25 * Structure for holding small ModelChecker members that should be snapshotted
27 struct model_snapshot_members {
28 model_snapshot_members() :
29 /* First thread created will have id INITIAL_THREAD_ID */
30 next_thread_id(INITIAL_THREAD_ID),
31 used_sequence_numbers(0),
34 failed_promise(false),
35 hard_failed_promise(false),
36 too_many_reads(false),
37 no_valid_reads(false),
38 bad_synchronization(false),
43 ~model_snapshot_members() {
44 for (unsigned int i = 0; i < bugs.size(); i++)
49 unsigned int next_thread_id;
50 modelclock_t used_sequence_numbers;
51 ModelAction *next_backtrack;
52 SnapVector<bug_message *> bugs;
54 bool hard_failed_promise;
57 /** @brief Incorrectly-ordered synchronization was made */
58 bool bad_synchronization;
65 /** @brief Constructor */
66 ModelExecution::ModelExecution(ModelChecker *m,
67 const struct model_params *params,
69 NodeStack *node_stack) :
74 thread_map(2), /* We'll always need at least 2 threads */
76 condvar_waiters_map(),
82 thrd_last_fence_release(),
83 node_stack(node_stack),
84 priv(new struct model_snapshot_members()),
85 mo_graph(new CycleGraph())
87 /* Initialize a model-checker thread, for special ModelActions */
88 model_thread = new Thread(get_next_id()); // L: Create model thread
89 add_thread(model_thread); // L: Add model thread to scheduler
90 scheduler->register_engine(this);
91 node_stack->register_engine(this);
94 /** @brief Destructor */
95 ModelExecution::~ModelExecution()
97 for (unsigned int i = 0; i < get_num_threads(); i++)
98 delete get_thread(int_to_id(i));
100 for (unsigned int i = 0; i < promises.size(); i++)
107 int ModelExecution::get_execution_number() const
109 return model->get_execution_number();
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
114 action_list_t *tmp = hash->get(ptr);
116 tmp = new action_list_t();
122 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
124 SnapVector<action_list_t> *tmp = hash->get(ptr);
126 tmp = new SnapVector<action_list_t>();
132 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
134 SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
137 unsigned int thread=id_to_int(tid);
138 if (thread < wrv->size())
139 return &(*wrv)[thread];
144 /** @return a thread ID for a new Thread */
145 thread_id_t ModelExecution::get_next_id()
147 return priv->next_thread_id++;
150 /** @return the number of user threads created during this execution */
151 unsigned int ModelExecution::get_num_threads() const
153 return priv->next_thread_id;
156 /** @return a sequence number for a new ModelAction */
157 modelclock_t ModelExecution::get_next_seq_num()
159 return ++priv->used_sequence_numbers;
163 * @brief Should the current action wake up a given thread?
165 * @param curr The current action
166 * @param thread The thread that we might wake up
167 * @return True, if we should wake up the sleeping thread; false otherwise
169 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
171 const ModelAction *asleep = thread->get_pending();
172 /* Don't allow partial RMW to wake anyone up */
175 /* Synchronizing actions may have been backtracked */
176 if (asleep->could_synchronize_with(curr))
178 /* All acquire/release fences and fence-acquire/store-release */
179 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
181 /* Fence-release + store can awake load-acquire on the same location */
182 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
183 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
184 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
190 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
192 for (unsigned int i = 0; i < get_num_threads(); i++) {
193 Thread *thr = get_thread(int_to_id(i));
194 if (scheduler->is_sleep_set(thr)) {
195 if (should_wake_up(curr, thr))
196 /* Remove this thread from sleep set */
197 scheduler->remove_sleep(thr);
202 /** @brief Alert the model-checker that an incorrectly-ordered
203 * synchronization was made */
204 void ModelExecution::set_bad_synchronization()
206 priv->bad_synchronization = true;
209 /** @brief Alert the model-checker that an incorrectly-ordered
210 * synchronization was made */
211 void ModelExecution::set_bad_sc_read()
213 priv->bad_sc_read = true;
216 bool ModelExecution::assert_bug(const char *msg)
218 priv->bugs.push_back(new bug_message(msg));
220 if (isfeasibleprefix()) {
227 /** @return True, if any bugs have been reported for this execution */
228 bool ModelExecution::have_bug_reports() const
230 return priv->bugs.size() != 0;
233 SnapVector<bug_message *> * ModelExecution::get_bugs() const
239 * Check whether the current trace has triggered an assertion which should halt
242 * @return True, if the execution should be aborted; false otherwise
244 bool ModelExecution::has_asserted() const
246 return priv->asserted;
250 * Trigger a trace assertion which should cause this execution to be halted.
251 * This can be due to a detected bug or due to an infeasibility that should
254 void ModelExecution::set_assert()
256 priv->asserted = true;
260 * Check if we are in a deadlock. Should only be called at the end of an
261 * execution, although it should not give false positives in the middle of an
262 * execution (there should be some ENABLED thread).
264 * @return True if program is in a deadlock; false otherwise
266 bool ModelExecution::is_deadlocked() const
268 bool blocking_threads = false;
269 for (unsigned int i = 0; i < get_num_threads(); i++) {
270 thread_id_t tid = int_to_id(i);
273 Thread *t = get_thread(tid);
274 if (!t->is_model_thread() && t->get_pending())
275 blocking_threads = true;
277 return blocking_threads;
281 * @brief Check if we are yield-blocked
283 * A program can be "yield-blocked" if all threads are ready to execute a
286 * @return True if the program is yield-blocked; false otherwise
288 bool ModelExecution::is_yieldblocked() const
290 if (!params->yieldblock)
293 for (unsigned int i = 0; i < get_num_threads(); i++) {
294 thread_id_t tid = int_to_id(i);
295 Thread *t = get_thread(tid);
296 if (t->get_pending() && t->get_pending()->is_yield())
303 * Check if this is a complete execution. That is, have all thread completed
304 * execution (rather than exiting because sleep sets have forced a redundant
307 * @return True if the execution is complete.
309 bool ModelExecution::is_complete_execution() const
311 if (is_yieldblocked())
313 for (unsigned int i = 0; i < get_num_threads(); i++)
314 if (is_enabled(int_to_id(i)))
320 * @brief Find the last fence-related backtracking conflict for a ModelAction
322 * This function performs the search for the most recent conflicting action
323 * against which we should perform backtracking, as affected by fence
324 * operations. This includes pairs of potentially-synchronizing actions which
325 * occur due to fence-acquire or fence-release, and hence should be explored in
326 * the opposite execution order.
328 * @param act The current action
329 * @return The most recent action which conflicts with act due to fences
331 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
333 /* Only perform release/acquire fence backtracking for stores */
334 if (!act->is_write())
337 /* Find a fence-release (or, act is a release) */
338 ModelAction *last_release;
339 if (act->is_release())
342 last_release = get_last_fence_release(act->get_tid());
346 /* Skip past the release */
347 const action_list_t *list = &action_trace;
348 action_list_t::const_reverse_iterator rit;
349 for (rit = list->rbegin(); rit != list->rend(); rit++)
350 if (*rit == last_release)
352 ASSERT(rit != list->rend());
357 * load --sb-> fence-acquire */
358 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
359 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
360 bool found_acquire_fences = false;
361 for ( ; rit != list->rend(); rit++) {
362 ModelAction *prev = *rit;
363 if (act->same_thread(prev))
366 int tid = id_to_int(prev->get_tid());
368 if (prev->is_read() && act->same_var(prev)) {
369 if (prev->is_acquire()) {
370 /* Found most recent load-acquire, don't need
371 * to search for more fences */
372 if (!found_acquire_fences)
375 prior_loads[tid] = prev;
378 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
379 found_acquire_fences = true;
380 acquire_fences[tid] = prev;
384 ModelAction *latest_backtrack = NULL;
385 for (unsigned int i = 0; i < acquire_fences.size(); i++)
386 if (acquire_fences[i] && prior_loads[i])
387 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
388 latest_backtrack = acquire_fences[i];
389 return latest_backtrack;
393 * @brief Find the last backtracking conflict for a ModelAction
395 * This function performs the search for the most recent conflicting action
396 * against which we should perform backtracking. This primary includes pairs of
397 * synchronizing actions which should be explored in the opposite execution
400 * @param act The current action
401 * @return The most recent action which conflicts with act
403 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
405 switch (act->get_type()) {
407 /* Only seq-cst fences can (directly) cause backtracking */
408 if (!act->is_seqcst())
413 ModelAction *ret = NULL;
415 /* linear search: from most recent to oldest */
416 action_list_t *list = obj_map.get(act->get_location());
417 action_list_t::reverse_iterator rit;
418 for (rit = list->rbegin(); rit != list->rend(); rit++) {
419 ModelAction *prev = *rit;
422 if (prev->could_synchronize_with(act)) {
428 ModelAction *ret2 = get_last_fence_conflict(act);
438 case ATOMIC_TRYLOCK: {
439 /* linear search: from most recent to oldest */
440 action_list_t *list = obj_map.get(act->get_location());
441 action_list_t::reverse_iterator rit;
442 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443 ModelAction *prev = *rit;
444 if (act->is_conflicting_lock(prev))
449 case ATOMIC_UNLOCK: {
450 /* linear search: from most recent to oldest */
451 action_list_t *list = obj_map.get(act->get_location());
452 action_list_t::reverse_iterator rit;
453 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454 ModelAction *prev = *rit;
455 if (!act->same_thread(prev) && prev->is_failed_trylock())
461 /* linear search: from most recent to oldest */
462 action_list_t *list = obj_map.get(act->get_location());
463 action_list_t::reverse_iterator rit;
464 for (rit = list->rbegin(); rit != list->rend(); rit++) {
465 ModelAction *prev = *rit;
466 if (!act->same_thread(prev) && prev->is_failed_trylock())
468 if (!act->same_thread(prev) && prev->is_notify())
474 case ATOMIC_NOTIFY_ALL:
475 case ATOMIC_NOTIFY_ONE: {
476 /* linear search: from most recent to oldest */
477 action_list_t *list = obj_map.get(act->get_location());
478 action_list_t::reverse_iterator rit;
479 for (rit = list->rbegin(); rit != list->rend(); rit++) {
480 ModelAction *prev = *rit;
481 if (!act->same_thread(prev) && prev->is_wait())
492 /** This method finds backtracking points where we should try to
493 * reorder the parameter ModelAction against.
495 * @param the ModelAction to find backtracking points for.
497 void ModelExecution::set_backtracking(ModelAction *act)
499 Thread *t = get_thread(act);
500 ModelAction *prev = get_last_conflict(act);
504 Node *node = prev->get_node()->get_parent();
506 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
507 int low_tid, high_tid;
508 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
509 low_tid = id_to_int(act->get_tid());
510 high_tid = low_tid + 1;
513 high_tid = get_num_threads();
516 for (int i = low_tid; i < high_tid; i++) {
517 thread_id_t tid = int_to_id(i);
519 /* Make sure this thread can be enabled here. */
520 if (i >= node->get_num_threads())
523 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
524 /* Don't backtrack into a point where the thread is disabled or sleeping. */
525 if (node->enabled_status(tid) != THREAD_ENABLED)
528 /* Check if this has been explored already */
529 if (node->has_been_explored(tid))
532 /* See if fairness allows */
533 if (params->fairwindow != 0 && !node->has_priority(tid)) {
535 for (int t = 0; t < node->get_num_threads(); t++) {
536 thread_id_t tother = int_to_id(t);
537 if (node->is_enabled(tother) && node->has_priority(tother)) {
546 /* See if CHESS-like yield fairness allows */
547 if (params->yieldon) {
549 for (int t = 0; t < node->get_num_threads(); t++) {
550 thread_id_t tother = int_to_id(t);
551 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
560 /* Cache the latest backtracking point */
561 set_latest_backtrack(prev);
563 /* If this is a new backtracking point, mark the tree */
564 if (!node->set_backtrack(tid))
566 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
567 id_to_int(prev->get_tid()),
568 id_to_int(t->get_id()));
577 * @brief Cache the a backtracking point as the "most recent", if eligible
579 * Note that this does not prepare the NodeStack for this backtracking
580 * operation, it only caches the action on a per-execution basis
582 * @param act The operation at which we should explore a different next action
583 * (i.e., backtracking point)
584 * @return True, if this action is now the most recent backtracking point;
587 bool ModelExecution::set_latest_backtrack(ModelAction *act)
589 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
590 priv->next_backtrack = act;
597 * Returns last backtracking point. The model checker will explore a different
598 * path for this point in the next execution.
599 * @return The ModelAction at which the next execution should diverge.
601 ModelAction * ModelExecution::get_next_backtrack()
603 ModelAction *next = priv->next_backtrack;
604 priv->next_backtrack = NULL;
609 * Processes a read model action.
610 * @param curr is the read model action to process.
611 * @return True if processing this read updates the mo_graph.
613 bool ModelExecution::process_read(ModelAction *curr)
615 Node *node = curr->get_node();
617 bool updated = false;
618 switch (node->get_read_from_status()) {
619 case READ_FROM_PAST: {
620 const ModelAction *rf = node->get_read_from_past();
623 mo_graph->startChanges();
625 ASSERT(!is_infeasible());
626 if (!check_recency(curr, rf)) {
627 if (node->increment_read_from()) {
628 mo_graph->rollbackChanges();
631 priv->too_many_reads = true;
635 updated = r_modification_order(curr, rf);
637 mo_graph->commitChanges();
638 mo_check_promises(curr, true);
641 case READ_FROM_PROMISE: {
642 Promise *promise = curr->get_node()->get_read_from_promise();
643 if (promise->add_reader(curr))
644 priv->failed_promise = true;
645 curr->set_read_from_promise(promise);
646 mo_graph->startChanges();
647 if (!check_recency(curr, promise))
648 priv->too_many_reads = true;
649 updated = r_modification_order(curr, promise);
650 mo_graph->commitChanges();
653 case READ_FROM_FUTURE: {
654 /* Read from future value */
655 struct future_value fv = node->get_future_value();
656 Promise *promise = new Promise(this, curr, fv);
657 curr->set_read_from_promise(promise);
658 promises.push_back(promise);
659 mo_graph->startChanges();
660 updated = r_modification_order(curr, promise);
661 mo_graph->commitChanges();
667 get_thread(curr)->set_return_value(curr->get_return_value());
673 * Processes a lock, trylock, or unlock model action. @param curr is
674 * the read model action to process.
676 * The try lock operation checks whether the lock is taken. If not,
677 * it falls to the normal lock operation case. If so, it returns
680 * The lock operation has already been checked that it is enabled, so
681 * it just grabs the lock and synchronizes with the previous unlock.
683 * The unlock operation has to re-enable all of the threads that are
684 * waiting on the lock.
686 * @return True if synchronization was updated; false otherwise
688 bool ModelExecution::process_mutex(ModelAction *curr)
690 std::mutex *mutex = curr->get_mutex();
691 struct std::mutex_state *state = NULL;
694 state = mutex->get_state();
696 switch (curr->get_type()) {
697 case ATOMIC_TRYLOCK: {
698 bool success = !state->locked;
699 curr->set_try_lock(success);
701 get_thread(curr)->set_return_value(0);
704 get_thread(curr)->set_return_value(1);
706 //otherwise fall into the lock case
708 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
709 assert_bug("Lock access before initialization");
710 state->locked = get_thread(curr);
711 ModelAction *unlock = get_last_unlock(curr);
712 //synchronize with the previous unlock statement
713 if (unlock != NULL) {
714 synchronize(unlock, curr);
720 case ATOMIC_UNLOCK: {
721 /* wake up the other threads */
722 for (unsigned int i = 0; i < get_num_threads(); i++) {
723 Thread *t = get_thread(int_to_id(i));
724 Thread *curr_thrd = get_thread(curr);
725 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
729 /* unlock the lock - after checking who was waiting on it */
730 state->locked = NULL;
732 if (!curr->is_wait())
733 break; /* The rest is only for ATOMIC_WAIT */
735 /* Should we go to sleep? (simulate spurious failures) */
736 if (curr->get_node()->get_misc() == 0) {
737 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
739 scheduler->sleep(get_thread(curr));
743 case ATOMIC_NOTIFY_ALL: {
744 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
745 //activate all the waiting threads
746 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
747 scheduler->wake(get_thread(*rit));
752 case ATOMIC_NOTIFY_ONE: {
753 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
754 int wakeupthread = curr->get_node()->get_misc();
755 action_list_t::iterator it = waiters->begin();
756 advance(it, wakeupthread);
757 scheduler->wake(get_thread(*it));
769 * @brief Check if the current pending promises allow a future value to be sent
771 * It is unsafe to pass a future value back if there exists a pending promise Pr
774 * reader --exec-> Pr --exec-> writer
776 * If such Pr exists, we must save the pending future value until Pr is
779 * @param writer The operation which sends the future value. Must be a write.
780 * @param reader The operation which will observe the value. Must be a read.
781 * @return True if the future value can be sent now; false if it must wait.
783 bool ModelExecution::promises_may_allow(const ModelAction *writer,
784 const ModelAction *reader) const
786 for (int i = promises.size() - 1; i >= 0; i--) {
787 ModelAction *pr = promises[i]->get_reader(0);
788 //reader is after promise...doesn't cross any promise
791 //writer is after promise, reader before...bad...
799 * @brief Add a future value to a reader
801 * This function performs a few additional checks to ensure that the future
802 * value can be feasibly observed by the reader
804 * @param writer The operation whose value is sent. Must be a write.
805 * @param reader The read operation which may read the future value. Must be a read.
807 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
809 /* Do more ambitious checks now that mo is more complete */
810 if (!mo_may_allow(writer, reader))
813 Node *node = reader->get_node();
815 /* Find an ancestor thread which exists at the time of the reader */
816 Thread *write_thread = get_thread(writer);
817 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
818 write_thread = write_thread->get_parent();
820 struct future_value fv = {
821 writer->get_write_value(),
822 writer->get_seq_number() + params->maxfuturedelay,
823 write_thread->get_id(),
825 if (node->add_future_value(fv))
826 set_latest_backtrack(reader);
830 * Process a write ModelAction
831 * @param curr The ModelAction to process
832 * @param work The work queue, for adding fixup work
833 * @return True if the mo_graph was updated or promises were resolved
835 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
837 /* Readers to which we may send our future value */
838 ModelVector<ModelAction *> send_fv;
840 const ModelAction *earliest_promise_reader;
841 bool updated_promises = false;
843 bool updated_mod_order = w_modification_order(curr, &send_fv);
844 Promise *promise = pop_promise_to_resolve(curr);
847 earliest_promise_reader = promise->get_reader(0);
848 updated_promises = resolve_promise(curr, promise, work);
850 earliest_promise_reader = NULL;
852 for (unsigned int i = 0; i < send_fv.size(); i++) {
853 ModelAction *read = send_fv[i];
855 /* Don't send future values to reads after the Promise we resolve */
856 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
857 /* Check if future value can be sent immediately */
858 if (promises_may_allow(curr, read)) {
859 add_future_value(curr, read);
861 futurevalues.push_back(PendingFutureValue(curr, read));
866 /* Check the pending future values */
867 for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
868 struct PendingFutureValue pfv = futurevalues[i];
869 if (promises_may_allow(pfv.writer, pfv.reader)) {
870 add_future_value(pfv.writer, pfv.reader);
871 futurevalues.erase(futurevalues.begin() + i);
875 mo_graph->commitChanges();
876 mo_check_promises(curr, false);
878 get_thread(curr)->set_return_value(VALUE_NONE);
879 return updated_mod_order || updated_promises;
883 * Process a fence ModelAction
884 * @param curr The ModelAction to process
885 * @return True if synchronization was updated
887 bool ModelExecution::process_fence(ModelAction *curr)
890 * fence-relaxed: no-op
891 * fence-release: only log the occurence (not in this function), for
892 * use in later synchronization
893 * fence-acquire (this function): search for hypothetical release
895 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
897 bool updated = false;
898 if (curr->is_acquire()) {
899 action_list_t *list = &action_trace;
900 action_list_t::reverse_iterator rit;
901 /* Find X : is_read(X) && X --sb-> curr */
902 for (rit = list->rbegin(); rit != list->rend(); rit++) {
903 ModelAction *act = *rit;
906 if (act->get_tid() != curr->get_tid())
908 /* Stop at the beginning of the thread */
909 if (act->is_thread_start())
911 /* Stop once we reach a prior fence-acquire */
912 if (act->is_fence() && act->is_acquire())
916 /* read-acquire will find its own release sequences */
917 if (act->is_acquire())
920 /* Establish hypothetical release sequences */
921 rel_heads_list_t release_heads;
922 get_release_seq_heads(curr, act, &release_heads);
923 for (unsigned int i = 0; i < release_heads.size(); i++)
924 synchronize(release_heads[i], curr);
925 if (release_heads.size() != 0)
933 * @brief Process the current action for thread-related activity
935 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
936 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
937 * synchronization, etc. This function is a no-op for non-THREAD actions
938 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
940 * @param curr The current action
941 * @return True if synchronization was updated or a thread completed
943 bool ModelExecution::process_thread_action(ModelAction *curr)
945 bool updated = false;
947 switch (curr->get_type()) {
948 case THREAD_CREATE: {
949 thrd_t *thrd = (thrd_t *)curr->get_location();
950 struct thread_params *params = (struct thread_params *)curr->get_value();
951 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
952 curr->set_thread_operand(th);
954 th->set_creation(curr);
955 /* Promises can be satisfied by children */
956 for (unsigned int i = 0; i < promises.size(); i++) {
957 Promise *promise = promises[i];
958 if (promise->thread_is_available(curr->get_tid()))
959 promise->add_thread(th->get_id());
963 case PTHREAD_CREATE: {
964 thrd_t *thrd = (thrd_t *)curr->get_location();
965 struct pthread_params *params = (struct pthread_params *)curr->get_value();
966 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
967 curr->set_thread_operand(th);
969 th->set_creation(curr);
970 /* Promises can be satisfied by children */
971 for (unsigned int i = 0; i < promises.size(); i++) {
972 Promise *promise = promises[i];
973 if (promise->thread_is_available(curr->get_tid()))
974 promise->add_thread(th->get_id());
979 Thread *blocking = curr->get_thread_operand();
980 ModelAction *act = get_last_action(blocking->get_id());
981 synchronize(act, curr);
982 updated = true; /* trigger rel-seq checks */
986 Thread *blocking = curr->get_thread_operand();
987 ModelAction *act = get_last_action(blocking->get_id());
988 synchronize(act, curr);
989 updated = true; /* trigger rel-seq checks */
990 break; // WL: to be add (modified)
993 case THREAD_FINISH: {
994 Thread *th = get_thread(curr);
995 /* Wake up any joining threads */
996 for (unsigned int i = 0; i < get_num_threads(); i++) {
997 Thread *waiting = get_thread(int_to_id(i));
998 if (waiting->waiting_on() == th &&
999 waiting->get_pending()->is_thread_join())
1000 scheduler->wake(waiting);
1003 /* Completed thread can't satisfy promises */
1004 for (unsigned int i = 0; i < promises.size(); i++) {
1005 Promise *promise = promises[i];
1006 if (promise->thread_is_available(th->get_id()))
1007 if (promise->eliminate_thread(th->get_id()))
1008 priv->failed_promise = true;
1010 updated = true; /* trigger rel-seq checks */
1013 case THREAD_START: {
1014 check_promises(curr->get_tid(), NULL, curr->get_cv());
1025 * @brief Process the current action for release sequence fixup activity
1027 * Performs model-checker release sequence fixups for the current action,
1028 * forcing a single pending release sequence to break (with a given, potential
1029 * "loose" write) or to complete (i.e., synchronize). If a pending release
1030 * sequence forms a complete release sequence, then we must perform the fixup
1031 * synchronization, mo_graph additions, etc.
1033 * @param curr The current action; must be a release sequence fixup action
1034 * @param work_queue The work queue to which to add work items as they are
1037 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1039 const ModelAction *write = curr->get_node()->get_relseq_break();
1040 struct release_seq *sequence = pending_rel_seqs.back();
1041 pending_rel_seqs.pop_back();
1043 ModelAction *acquire = sequence->acquire;
1044 const ModelAction *rf = sequence->rf;
1045 const ModelAction *release = sequence->release;
1049 ASSERT(release->same_thread(rf));
1051 if (write == NULL) {
1053 * @todo Forcing a synchronization requires that we set
1054 * modification order constraints. For instance, we can't allow
1055 * a fixup sequence in which two separate read-acquire
1056 * operations read from the same sequence, where the first one
1057 * synchronizes and the other doesn't. Essentially, we can't
1058 * allow any writes to insert themselves between 'release' and
1062 /* Must synchronize */
1063 if (!synchronize(release, acquire))
1066 /* Propagate the changed clock vector */
1067 propagate_clockvector(acquire, work_queue);
1069 /* Break release sequence with new edges:
1070 * release --mo--> write --mo--> rf */
1071 mo_graph->addEdge(release, write);
1072 mo_graph->addEdge(write, rf);
1075 /* See if we have realized a data race */
1080 * Initialize the current action by performing one or more of the following
1081 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1082 * in the NodeStack, manipulating backtracking sets, allocating and
1083 * initializing clock vectors, and computing the promises to fulfill.
1085 * @param curr The current action, as passed from the user context; may be
1086 * freed/invalidated after the execution of this function, with a different
1087 * action "returned" its place (pass-by-reference)
1088 * @return True if curr is a newly-explored action; false otherwise
1090 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1092 ModelAction *newcurr;
1094 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1095 newcurr = process_rmw(*curr);
1098 if (newcurr->is_rmw())
1099 compute_promises(newcurr);
1105 (*curr)->set_seq_number(get_next_seq_num());
1107 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1109 /* First restore type and order in case of RMW operation */
1110 if ((*curr)->is_rmwr())
1111 newcurr->copy_typeandorder(*curr);
1113 ASSERT((*curr)->get_location() == newcurr->get_location());
1114 newcurr->copy_from_new(*curr);
1116 /* Discard duplicate ModelAction; use action from NodeStack */
1119 /* Always compute new clock vector */
1120 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1123 return false; /* Action was explored previously */
1127 /* Always compute new clock vector */
1128 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1130 /* Assign most recent release fence */
1131 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1134 * Perform one-time actions when pushing new ModelAction onto
1137 if (newcurr->is_write())
1138 compute_promises(newcurr);
1139 else if (newcurr->is_relseq_fixup())
1140 compute_relseq_breakwrites(newcurr);
1141 else if (newcurr->is_wait())
1142 newcurr->get_node()->set_misc_max(2);
1143 else if (newcurr->is_notify_one()) {
1144 newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1146 return true; /* This was a new ModelAction */
1151 * @brief Establish reads-from relation between two actions
1153 * Perform basic operations involved with establishing a concrete rf relation,
1154 * including setting the ModelAction data and checking for release sequences.
1156 * @param act The action that is reading (must be a read)
1157 * @param rf The action from which we are reading (must be a write)
1159 * @return True if this read established synchronization
1162 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1165 ASSERT(rf->is_write());
1167 act->set_read_from(rf);
1168 if (act->is_acquire()) {
1169 rel_heads_list_t release_heads;
1170 get_release_seq_heads(act, act, &release_heads);
1171 int num_heads = release_heads.size();
1172 for (unsigned int i = 0; i < release_heads.size(); i++)
1173 if (!synchronize(release_heads[i], act))
1175 return num_heads > 0;
1181 * @brief Synchronizes two actions
1183 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1184 * This function performs the synchronization as well as providing other hooks
1185 * for other checks along with synchronization.
1187 * @param first The left-hand side of the synchronizes-with relation
1188 * @param second The right-hand side of the synchronizes-with relation
1189 * @return True if the synchronization was successful (i.e., was consistent
1190 * with the execution order); false otherwise
1192 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1194 if (*second < *first) {
1195 set_bad_synchronization();
1198 check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1199 return second->synchronize_with(first);
1203 * Check promises and eliminate potentially-satisfying threads when a thread is
1204 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1205 * no longer satisfy a promise generated from that thread.
1207 * @param blocker The thread on which a thread is waiting
1208 * @param waiting The waiting thread
1210 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1212 for (unsigned int i = 0; i < promises.size(); i++) {
1213 Promise *promise = promises[i];
1214 if (!promise->thread_is_available(waiting->get_id()))
1216 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1217 ModelAction *reader = promise->get_reader(j);
1218 if (reader->get_tid() != blocker->get_id())
1220 if (promise->eliminate_thread(waiting->get_id())) {
1221 /* Promise has failed */
1222 priv->failed_promise = true;
1224 /* Only eliminate the 'waiting' thread once */
1232 * @brief Check whether a model action is enabled.
1234 * Checks whether an operation would be successful (i.e., is a lock already
1235 * locked, or is the joined thread already complete).
1237 * For yield-blocking, yields are never enabled.
1239 * @param curr is the ModelAction to check whether it is enabled.
1240 * @return a bool that indicates whether the action is enabled.
1242 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1243 if (curr->is_lock()) {
1244 std::mutex *lock = curr->get_mutex();
1245 struct std::mutex_state *state = lock->get_state();
1248 } else if (curr->is_thread_join()) {
1249 Thread *blocking = curr->get_thread_operand();
1250 if (!blocking->is_complete()) {
1251 thread_blocking_check_promises(blocking, get_thread(curr));
1254 } else if (params->yieldblock && curr->is_yield()) {
1262 * This is the heart of the model checker routine. It performs model-checking
1263 * actions corresponding to a given "current action." Among other processes, it
1264 * calculates reads-from relationships, updates synchronization clock vectors,
1265 * forms a memory_order constraints graph, and handles replay/backtrack
1266 * execution when running permutations of previously-observed executions.
1268 * @param curr The current action to process
1269 * @return The ModelAction that is actually executed; may be different than
1272 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1275 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1276 bool newly_explored = initialize_curr_action(&curr);
1280 wake_up_sleeping_actions(curr);
1282 /* Compute fairness information for CHESS yield algorithm */
1283 if (params->yieldon) {
1284 curr->get_node()->update_yield(scheduler);
1287 /* Add the action to lists before any other model-checking tasks */
1288 if (!second_part_of_rmw)
1289 add_action_to_lists(curr);
1291 /* Build may_read_from set for newly-created actions */
1292 if (newly_explored && curr->is_read())
1293 build_may_read_from(curr);
1295 /* Initialize work_queue with the "current action" work */
1296 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1297 while (!work_queue.empty() && !has_asserted()) {
1298 WorkQueueEntry work = work_queue.front();
1299 work_queue.pop_front();
1301 switch (work.type) {
1302 case WORK_CHECK_CURR_ACTION: {
1303 ModelAction *act = work.action;
1304 bool update = false; /* update this location's release seq's */
1305 bool update_all = false; /* update all release seq's */
1307 if (process_thread_action(curr))
1310 if (act->is_read() && !second_part_of_rmw && process_read(act))
1313 if (act->is_write() && process_write(act, &work_queue))
1316 if (act->is_fence() && process_fence(act))
1319 if (act->is_mutex_op() && process_mutex(act))
1322 if (act->is_relseq_fixup())
1323 process_relseq_fixup(curr, &work_queue);
1326 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1328 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1331 case WORK_CHECK_RELEASE_SEQ:
1332 resolve_release_sequences(work.location, &work_queue);
1334 case WORK_CHECK_MO_EDGES: {
1335 /** @todo Complete verification of work_queue */
1336 ModelAction *act = work.action;
1337 bool updated = false;
1339 if (act->is_read()) {
1340 const ModelAction *rf = act->get_reads_from();
1341 const Promise *promise = act->get_reads_from_promise();
1343 if (r_modification_order(act, rf))
1345 if (act->is_seqcst()) {
1346 ModelAction *last_sc_write = get_last_seq_cst_write(act);
1347 if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
1351 } else if (promise) {
1352 if (r_modification_order(act, promise))
1356 if (act->is_write()) {
1357 if (w_modification_order(act, NULL))
1360 mo_graph->commitChanges();
1363 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1372 check_curr_backtracking(curr);
1373 set_backtracking(curr);
1377 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1379 Node *currnode = curr->get_node();
1380 Node *parnode = currnode->get_parent();
1382 if ((parnode && !parnode->backtrack_empty()) ||
1383 !currnode->misc_empty() ||
1384 !currnode->read_from_empty() ||
1385 !currnode->promise_empty() ||
1386 !currnode->relseq_break_empty()) {
1387 set_latest_backtrack(curr);
1391 bool ModelExecution::promises_expired() const
1393 for (unsigned int i = 0; i < promises.size(); i++) {
1394 Promise *promise = promises[i];
1395 if (promise->get_expiration() < priv->used_sequence_numbers)
1402 * This is the strongest feasibility check available.
1403 * @return whether the current trace (partial or complete) must be a prefix of
1406 bool ModelExecution::isfeasibleprefix() const
1408 return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1412 * Print disagnostic information about an infeasible execution
1413 * @param prefix A string to prefix the output with; if NULL, then a default
1414 * message prefix will be provided
1416 void ModelExecution::print_infeasibility(const char *prefix) const
1420 if (mo_graph->checkForCycles())
1421 ptr += sprintf(ptr, "[mo cycle]");
1422 if (priv->failed_promise || priv->hard_failed_promise)
1423 ptr += sprintf(ptr, "[failed promise]");
1424 if (priv->too_many_reads)
1425 ptr += sprintf(ptr, "[too many reads]");
1426 if (priv->no_valid_reads)
1427 ptr += sprintf(ptr, "[no valid reads-from]");
1428 if (priv->bad_synchronization)
1429 ptr += sprintf(ptr, "[bad sw ordering]");
1430 if (priv->bad_sc_read)
1431 ptr += sprintf(ptr, "[bad sc read]");
1432 if (promises_expired())
1433 ptr += sprintf(ptr, "[promise expired]");
1434 if (promises.size() != 0)
1435 ptr += sprintf(ptr, "[unresolved promise]");
1437 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1441 * Returns whether the current completed trace is feasible, except for pending
1442 * release sequences.
1444 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1446 return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1451 * Check if the current partial trace is infeasible. Does not check any
1452 * end-of-execution flags, which might rule out the execution. Thus, this is
1453 * useful only for ruling an execution as infeasible.
1454 * @return whether the current partial trace is infeasible.
1456 bool ModelExecution::is_infeasible() const
1458 return mo_graph->checkForCycles() ||
1459 priv->no_valid_reads ||
1460 priv->too_many_reads ||
1461 priv->bad_synchronization ||
1462 priv->bad_sc_read ||
1463 priv->hard_failed_promise ||
1467 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1468 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1469 ModelAction *lastread = get_last_action(act->get_tid());
1470 lastread->process_rmw(act);
1471 if (act->is_rmw()) {
1472 if (lastread->get_reads_from())
1473 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1475 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1476 mo_graph->commitChanges();
1482 * A helper function for ModelExecution::check_recency, to check if the current
1483 * thread is able to read from a different write/promise for 'params.maxreads'
1484 * number of steps and if that write/promise should become visible (i.e., is
1485 * ordered later in the modification order). This helps model memory liveness.
1487 * @param curr The current action. Must be a read.
1488 * @param rf The write/promise from which we plan to read
1489 * @param other_rf The write/promise from which we may read
1490 * @return True if we were able to read from other_rf for params.maxreads steps
1492 template <typename T, typename U>
1493 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1495 /* Need a different write/promise */
1496 if (other_rf->equals(rf))
1499 /* Only look for "newer" writes/promises */
1500 if (!mo_graph->checkReachable(rf, other_rf))
1503 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1504 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1505 action_list_t::reverse_iterator rit = list->rbegin();
1506 ASSERT((*rit) == curr);
1507 /* Skip past curr */
1510 /* Does this write/promise work for everyone? */
1511 for (int i = 0; i < params->maxreads; i++, rit++) {
1512 ModelAction *act = *rit;
1513 if (!act->may_read_from(other_rf))
1520 * Checks whether a thread has read from the same write or Promise for too many
1521 * times without seeing the effects of a later write/Promise.
1524 * 1) there must a different write/promise that we could read from,
1525 * 2) we must have read from the same write/promise in excess of maxreads times,
1526 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1527 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1529 * If so, we decide that the execution is no longer feasible.
1531 * @param curr The current action. Must be a read.
1532 * @param rf The ModelAction/Promise from which we might read.
1533 * @return True if the read should succeed; false otherwise
1535 template <typename T>
1536 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1538 if (!params->maxreads)
1541 //NOTE: Next check is just optimization, not really necessary....
1542 if (curr->get_node()->get_read_from_past_size() +
1543 curr->get_node()->get_read_from_promise_size() <= 1)
1546 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1547 int tid = id_to_int(curr->get_tid());
1548 ASSERT(tid < (int)thrd_lists->size());
1549 action_list_t *list = &(*thrd_lists)[tid];
1550 action_list_t::reverse_iterator rit = list->rbegin();
1551 ASSERT((*rit) == curr);
1552 /* Skip past curr */
1555 action_list_t::reverse_iterator ritcopy = rit;
1556 /* See if we have enough reads from the same value */
1557 for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1558 if (ritcopy == list->rend())
1560 ModelAction *act = *ritcopy;
1561 if (!act->is_read())
1563 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1565 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1567 if (act->get_node()->get_read_from_past_size() +
1568 act->get_node()->get_read_from_promise_size() <= 1)
1571 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1572 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1573 if (should_read_instead(curr, rf, write))
1574 return false; /* liveness failure */
1576 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1577 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1578 if (should_read_instead(curr, rf, promise))
1579 return false; /* liveness failure */
1585 * @brief Updates the mo_graph with the constraints imposed from the current
1588 * Basic idea is the following: Go through each other thread and find
1589 * the last action that happened before our read. Two cases:
1591 * -# The action is a write: that write must either occur before
1592 * the write we read from or be the write we read from.
1593 * -# The action is a read: the write that that action read from
1594 * must occur before the write we read from or be the same write.
1596 * @param curr The current action. Must be a read.
1597 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1598 * @return True if modification order edges were added; false otherwise
1600 template <typename rf_type>
1601 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1603 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1606 ASSERT(curr->is_read());
1608 /* Last SC fence in the current thread */
1609 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1610 ModelAction *last_sc_write = NULL;
1611 if (curr->is_seqcst())
1612 last_sc_write = get_last_seq_cst_write(curr);
1614 /* Iterate over all threads */
1615 for (i = 0; i < thrd_lists->size(); i++) {
1616 /* Last SC fence in thread i */
1617 ModelAction *last_sc_fence_thread_local = NULL;
1618 if (int_to_id((int)i) != curr->get_tid())
1619 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1621 /* Last SC fence in thread i, before last SC fence in current thread */
1622 ModelAction *last_sc_fence_thread_before = NULL;
1623 if (last_sc_fence_local)
1624 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1626 /* Iterate over actions in thread, starting from most recent */
1627 action_list_t *list = &(*thrd_lists)[i];
1628 action_list_t::reverse_iterator rit;
1629 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1630 ModelAction *act = *rit;
1635 /* Don't want to add reflexive edges on 'rf' */
1636 if (act->equals(rf)) {
1637 if (act->happens_before(curr))
1643 if (act->is_write()) {
1644 /* C++, Section 29.3 statement 5 */
1645 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1646 *act < *last_sc_fence_thread_local) {
1647 added = mo_graph->addEdge(act, rf) || added;
1650 /* C++, Section 29.3 statement 4 */
1651 else if (act->is_seqcst() && last_sc_fence_local &&
1652 *act < *last_sc_fence_local) {
1653 added = mo_graph->addEdge(act, rf) || added;
1656 /* C++, Section 29.3 statement 6 */
1657 else if (last_sc_fence_thread_before &&
1658 *act < *last_sc_fence_thread_before) {
1659 added = mo_graph->addEdge(act, rf) || added;
1665 * Include at most one act per-thread that "happens
1668 if (act->happens_before(curr)) {
1669 if (act->is_write()) {
1670 added = mo_graph->addEdge(act, rf) || added;
1672 const ModelAction *prevrf = act->get_reads_from();
1673 const Promise *prevrf_promise = act->get_reads_from_promise();
1675 if (!prevrf->equals(rf))
1676 added = mo_graph->addEdge(prevrf, rf) || added;
1677 } else if (!prevrf_promise->equals(rf)) {
1678 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1687 * All compatible, thread-exclusive promises must be ordered after any
1688 * concrete loads from the same thread
1690 for (unsigned int i = 0; i < promises.size(); i++)
1691 if (promises[i]->is_compatible_exclusive(curr))
1692 added = mo_graph->addEdge(rf, promises[i]) || added;
1698 * Updates the mo_graph with the constraints imposed from the current write.
1700 * Basic idea is the following: Go through each other thread and find
1701 * the lastest action that happened before our write. Two cases:
1703 * (1) The action is a write => that write must occur before
1706 * (2) The action is a read => the write that that action read from
1707 * must occur before the current write.
1709 * This method also handles two other issues:
1711 * (I) Sequential Consistency: Making sure that if the current write is
1712 * seq_cst, that it occurs after the previous seq_cst write.
1714 * (II) Sending the write back to non-synchronizing reads.
1716 * @param curr The current action. Must be a write.
1717 * @param send_fv A vector for stashing reads to which we may pass our future
1718 * value. If NULL, then don't record any future values.
1719 * @return True if modification order edges were added; false otherwise
1721 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1723 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1726 ASSERT(curr->is_write());
1728 if (curr->is_seqcst()) {
1729 /* We have to at least see the last sequentially consistent write,
1730 so we are initialized. */
1731 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1732 if (last_seq_cst != NULL) {
1733 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1737 /* Last SC fence in the current thread */
1738 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1740 /* Iterate over all threads */
1741 for (i = 0; i < thrd_lists->size(); i++) {
1742 /* Last SC fence in thread i, before last SC fence in current thread */
1743 ModelAction *last_sc_fence_thread_before = NULL;
1744 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1745 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1747 /* Iterate over actions in thread, starting from most recent */
1748 action_list_t *list = &(*thrd_lists)[i];
1749 action_list_t::reverse_iterator rit;
1750 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1751 ModelAction *act = *rit;
1754 * 1) If RMW and it actually read from something, then we
1755 * already have all relevant edges, so just skip to next
1758 * 2) If RMW and it didn't read from anything, we should
1759 * whatever edge we can get to speed up convergence.
1761 * 3) If normal write, we need to look at earlier actions, so
1762 * continue processing list.
1764 if (curr->is_rmw()) {
1765 if (curr->get_reads_from() != NULL)
1773 /* C++, Section 29.3 statement 7 */
1774 if (last_sc_fence_thread_before && act->is_write() &&
1775 *act < *last_sc_fence_thread_before) {
1776 added = mo_graph->addEdge(act, curr) || added;
1781 * Include at most one act per-thread that "happens
1784 if (act->happens_before(curr)) {
1786 * Note: if act is RMW, just add edge:
1788 * The following edge should be handled elsewhere:
1789 * readfrom(act) --mo--> act
1791 if (act->is_write())
1792 added = mo_graph->addEdge(act, curr) || added;
1793 else if (act->is_read()) {
1794 //if previous read accessed a null, just keep going
1795 if (act->get_reads_from() == NULL) {
1796 added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
1798 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1801 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1802 !act->same_thread(curr)) {
1803 /* We have an action that:
1804 (1) did not happen before us
1805 (2) is a read and we are a write
1806 (3) cannot synchronize with us
1807 (4) is in a different thread
1809 that read could potentially read from our write. Note that
1810 these checks are overly conservative at this point, we'll
1811 do more checks before actually removing the
1816 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1817 if (!is_infeasible())
1818 send_fv->push_back(act);
1819 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1820 add_future_value(curr, act);
1827 * All compatible, thread-exclusive promises must be ordered after any
1828 * concrete stores to the same thread, or else they can be merged with
1831 for (unsigned int i = 0; i < promises.size(); i++)
1832 if (promises[i]->is_compatible_exclusive(curr))
1833 added = mo_graph->addEdge(curr, promises[i]) || added;
1838 //This procedure uses cohere to prune future values that are
1839 //guaranteed to generate a coherence violation.
1841 //need to see if there is (1) a promise for thread write, (2)
1842 //the promise is sb before write, (3) the promise can only be
1843 //resolved by the thread read, and (4) the promise has same
1844 //location as read/write
1846 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1847 thread_id_t write_tid=write->get_tid();
1848 for(unsigned int i = promises.size(); i>0; i--) {
1849 Promise *pr=promises[i-1];
1850 if (!pr->same_location(write))
1852 //the reading thread is the only thread that can resolve the promise
1853 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1854 for(unsigned int j=0;j<pr->get_num_readers();j++) {
1855 ModelAction *prreader=pr->get_reader(j);
1856 //the writing thread reads from the promise before the write
1857 if (prreader->get_tid()==write_tid &&
1858 (*prreader)<(*write)) {
1859 if ((*read)>(*prreader)) {
1860 //check that we don't have a read between the read and promise
1861 //from the same thread as read
1863 for(const ModelAction *tmp=read;tmp!=prreader;) {
1864 tmp=tmp->get_node()->get_parent()->get_action();
1865 if (tmp->is_read() && tmp->same_thread(read)) {
1882 /** Arbitrary reads from the future are not allowed. Section 29.3
1883 * part 9 places some constraints. This method checks one result of constraint
1884 * constraint. Others require compiler support. */
1885 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1887 if (!writer->is_rmw())
1890 if (!reader->is_rmw())
1893 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1894 if (search == reader)
1896 if (search->get_tid() == reader->get_tid() &&
1897 search->happens_before(reader))
1905 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1906 * some constraints. This method checks one the following constraint (others
1907 * require compiler support):
1909 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1910 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1912 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1914 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1916 /* Iterate over all threads */
1917 for (i = 0; i < thrd_lists->size(); i++) {
1918 const ModelAction *write_after_read = NULL;
1920 /* Iterate over actions in thread, starting from most recent */
1921 action_list_t *list = &(*thrd_lists)[i];
1922 action_list_t::reverse_iterator rit;
1923 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1924 ModelAction *act = *rit;
1926 /* Don't disallow due to act == reader */
1927 if (!reader->happens_before(act) || reader == act)
1929 else if (act->is_write())
1930 write_after_read = act;
1931 else if (act->is_read() && act->get_reads_from() != NULL)
1932 write_after_read = act->get_reads_from();
1935 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1942 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1943 * The ModelAction under consideration is expected to be taking part in
1944 * release/acquire synchronization as an object of the "reads from" relation.
1945 * Note that this can only provide release sequence support for RMW chains
1946 * which do not read from the future, as those actions cannot be traced until
1947 * their "promise" is fulfilled. Similarly, we may not even establish the
1948 * presence of a release sequence with certainty, as some modification order
1949 * constraints may be decided further in the future. Thus, this function
1950 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1951 * and a boolean representing certainty.
1953 * @param rf The action that might be part of a release sequence. Must be a
1955 * @param release_heads A pass-by-reference style return parameter. After
1956 * execution of this function, release_heads will contain the heads of all the
1957 * relevant release sequences, if any exists with certainty
1958 * @param pending A pass-by-reference style return parameter which is only used
1959 * when returning false (i.e., uncertain). Returns most information regarding
1960 * an uncertain release sequence, including any write operations that might
1961 * break the sequence.
1962 * @return true, if the ModelExecution is certain that release_heads is complete;
1965 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1966 rel_heads_list_t *release_heads,
1967 struct release_seq *pending) const
1969 /* Only check for release sequences if there are no cycles */
1970 if (mo_graph->checkForCycles())
1973 for ( ; rf != NULL; rf = rf->get_reads_from()) {
1974 ASSERT(rf->is_write());
1976 if (rf->is_release())
1977 release_heads->push_back(rf);
1978 else if (rf->get_last_fence_release())
1979 release_heads->push_back(rf->get_last_fence_release());
1981 break; /* End of RMW chain */
1983 /** @todo Need to be smarter here... In the linux lock
1984 * example, this will run to the beginning of the program for
1986 /** @todo The way to be smarter here is to keep going until 1
1987 * thread has a release preceded by an acquire and you've seen
1990 /* acq_rel RMW is a sufficient stopping condition */
1991 if (rf->is_acquire() && rf->is_release())
1992 return true; /* complete */
1995 /* read from future: need to settle this later */
1997 return false; /* incomplete */
2000 if (rf->is_release())
2001 return true; /* complete */
2003 /* else relaxed write
2004 * - check for fence-release in the same thread (29.8, stmt. 3)
2005 * - check modification order for contiguous subsequence
2006 * -> rf must be same thread as release */
2008 const ModelAction *fence_release = rf->get_last_fence_release();
2009 /* Synchronize with a fence-release unconditionally; we don't need to
2010 * find any more "contiguous subsequence..." for it */
2012 release_heads->push_back(fence_release);
2014 int tid = id_to_int(rf->get_tid());
2015 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
2016 action_list_t *list = &(*thrd_lists)[tid];
2017 action_list_t::const_reverse_iterator rit;
2019 /* Find rf in the thread list */
2020 rit = std::find(list->rbegin(), list->rend(), rf);
2021 ASSERT(rit != list->rend());
2023 /* Find the last {write,fence}-release */
2024 for (; rit != list->rend(); rit++) {
2025 if (fence_release && *(*rit) < *fence_release)
2027 if ((*rit)->is_release())
2030 if (rit == list->rend()) {
2031 /* No write-release in this thread */
2032 return true; /* complete */
2033 } else if (fence_release && *(*rit) < *fence_release) {
2034 /* The fence-release is more recent (and so, "stronger") than
2035 * the most recent write-release */
2036 return true; /* complete */
2037 } /* else, need to establish contiguous release sequence */
2038 ModelAction *release = *rit;
2040 ASSERT(rf->same_thread(release));
2042 pending->writes.clear();
2044 bool certain = true;
2045 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2046 if (id_to_int(rf->get_tid()) == (int)i)
2048 list = &(*thrd_lists)[i];
2050 /* Can we ensure no future writes from this thread may break
2051 * the release seq? */
2052 bool future_ordered = false;
2054 ModelAction *last = get_last_action(int_to_id(i));
2055 Thread *th = get_thread(int_to_id(i));
2056 if ((last && rf->happens_before(last)) ||
2059 future_ordered = true;
2061 ASSERT(!th->is_model_thread() || future_ordered);
2063 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2064 const ModelAction *act = *rit;
2065 /* Reach synchronization -> this thread is complete */
2066 if (act->happens_before(release))
2068 if (rf->happens_before(act)) {
2069 future_ordered = true;
2073 /* Only non-RMW writes can break release sequences */
2074 if (!act->is_write() || act->is_rmw())
2077 /* Check modification order */
2078 if (mo_graph->checkReachable(rf, act)) {
2079 /* rf --mo--> act */
2080 future_ordered = true;
2083 if (mo_graph->checkReachable(act, release))
2084 /* act --mo--> release */
2086 if (mo_graph->checkReachable(release, act) &&
2087 mo_graph->checkReachable(act, rf)) {
2088 /* release --mo-> act --mo--> rf */
2089 return true; /* complete */
2091 /* act may break release sequence */
2092 pending->writes.push_back(act);
2095 if (!future_ordered)
2096 certain = false; /* This thread is uncertain */
2100 release_heads->push_back(release);
2101 pending->writes.clear();
2103 pending->release = release;
2110 * An interface for getting the release sequence head(s) with which a
2111 * given ModelAction must synchronize. This function only returns a non-empty
2112 * result when it can locate a release sequence head with certainty. Otherwise,
2113 * it may mark the internal state of the ModelExecution so that it will handle
2114 * the release sequence at a later time, causing @a acquire to update its
2115 * synchronization at some later point in execution.
2117 * @param acquire The 'acquire' action that may synchronize with a release
2119 * @param read The read action that may read from a release sequence; this may
2120 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2121 * when 'acquire' is a fence-acquire)
2122 * @param release_heads A pass-by-reference return parameter. Will be filled
2123 * with the head(s) of the release sequence(s), if they exists with certainty.
2124 * @see ModelExecution::release_seq_heads
2126 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2127 ModelAction *read, rel_heads_list_t *release_heads)
2129 const ModelAction *rf = read->get_reads_from();
2130 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2131 sequence->acquire = acquire;
2132 sequence->read = read;
2134 if (!release_seq_heads(rf, release_heads, sequence)) {
2135 /* add act to 'lazy checking' list */
2136 pending_rel_seqs.push_back(sequence);
2138 snapshot_free(sequence);
2143 * @brief Propagate a modified clock vector to actions later in the execution
2146 * After an acquire operation lazily completes a release-sequence
2147 * synchronization, we must update all clock vectors for operations later than
2148 * the acquire in the execution order.
2150 * @param acquire The ModelAction whose clock vector must be propagated
2151 * @param work The work queue to which we can add work items, if this
2152 * propagation triggers more updates (e.g., to the modification order)
2154 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2156 /* Re-check all pending release sequences */
2157 work->push_back(CheckRelSeqWorkEntry(NULL));
2158 /* Re-check read-acquire for mo_graph edges */
2159 work->push_back(MOEdgeWorkEntry(acquire));
2161 /* propagate synchronization to later actions */
2162 action_list_t::reverse_iterator rit = action_trace.rbegin();
2163 for (; (*rit) != acquire; rit++) {
2164 ModelAction *propagate = *rit;
2165 if (acquire->happens_before(propagate)) {
2166 synchronize(acquire, propagate);
2167 /* Re-check 'propagate' for mo_graph edges */
2168 work->push_back(MOEdgeWorkEntry(propagate));
2174 * Attempt to resolve all stashed operations that might synchronize with a
2175 * release sequence for a given location. This implements the "lazy" portion of
2176 * determining whether or not a release sequence was contiguous, since not all
2177 * modification order information is present at the time an action occurs.
2179 * @param location The location/object that should be checked for release
2180 * sequence resolutions. A NULL value means to check all locations.
2181 * @param work_queue The work queue to which to add work items as they are
2183 * @return True if any updates occurred (new synchronization, new mo_graph
2186 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2188 bool updated = false;
2189 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2190 while (it != pending_rel_seqs.end()) {
2191 struct release_seq *pending = *it;
2192 ModelAction *acquire = pending->acquire;
2193 const ModelAction *read = pending->read;
2195 /* Only resolve sequences on the given location, if provided */
2196 if (location && read->get_location() != location) {
2201 const ModelAction *rf = read->get_reads_from();
2202 rel_heads_list_t release_heads;
2204 complete = release_seq_heads(rf, &release_heads, pending);
2205 for (unsigned int i = 0; i < release_heads.size(); i++)
2206 if (!acquire->has_synchronized_with(release_heads[i]))
2207 if (synchronize(release_heads[i], acquire))
2211 /* Propagate the changed clock vector */
2212 propagate_clockvector(acquire, work_queue);
2215 it = pending_rel_seqs.erase(it);
2216 snapshot_free(pending);
2222 // If we resolved promises or data races, see if we have realized a data race.
2229 * Performs various bookkeeping operations for the current ModelAction. For
2230 * instance, adds action to the per-object, per-thread action vector and to the
2231 * action trace list of all thread actions.
2233 * @param act is the ModelAction to add.
2235 void ModelExecution::add_action_to_lists(ModelAction *act)
2237 int tid = id_to_int(act->get_tid());
2238 ModelAction *uninit = NULL;
2240 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2241 if (list->empty() && act->is_atomic_var()) {
2242 uninit = get_uninitialized_action(act);
2243 uninit_id = id_to_int(uninit->get_tid());
2244 list->push_front(uninit);
2246 list->push_back(act);
2248 action_trace.push_back(act);
2250 action_trace.push_front(uninit);
2252 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2253 if (tid >= (int)vec->size())
2254 vec->resize(priv->next_thread_id);
2255 (*vec)[tid].push_back(act);
2257 (*vec)[uninit_id].push_front(uninit);
2259 if ((int)thrd_last_action.size() <= tid)
2260 thrd_last_action.resize(get_num_threads());
2261 thrd_last_action[tid] = act;
2263 thrd_last_action[uninit_id] = uninit;
2265 if (act->is_fence() && act->is_release()) {
2266 if ((int)thrd_last_fence_release.size() <= tid)
2267 thrd_last_fence_release.resize(get_num_threads());
2268 thrd_last_fence_release[tid] = act;
2271 if (act->is_wait()) {
2272 void *mutex_loc = (void *) act->get_value();
2273 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2275 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2276 if (tid >= (int)vec->size())
2277 vec->resize(priv->next_thread_id);
2278 (*vec)[tid].push_back(act);
2283 * @brief Get the last action performed by a particular Thread
2284 * @param tid The thread ID of the Thread in question
2285 * @return The last action in the thread
2287 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2289 int threadid = id_to_int(tid);
2290 if (threadid < (int)thrd_last_action.size())
2291 return thrd_last_action[id_to_int(tid)];
2297 * @brief Get the last fence release performed by a particular Thread
2298 * @param tid The thread ID of the Thread in question
2299 * @return The last fence release in the thread, if one exists; NULL otherwise
2301 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2303 int threadid = id_to_int(tid);
2304 if (threadid < (int)thrd_last_fence_release.size())
2305 return thrd_last_fence_release[id_to_int(tid)];
2311 * Gets the last memory_order_seq_cst write (in the total global sequence)
2312 * performed on a particular object (i.e., memory location), not including the
2314 * @param curr The current ModelAction; also denotes the object location to
2316 * @return The last seq_cst write
2318 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2320 void *location = curr->get_location();
2321 action_list_t *list = obj_map.get(location);
2322 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2323 action_list_t::reverse_iterator rit;
2324 for (rit = list->rbegin(); (*rit) != curr; rit++)
2326 rit++; /* Skip past curr */
2327 for ( ; rit != list->rend(); rit++)
2328 if ((*rit)->is_write() && (*rit)->is_seqcst())
2334 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2335 * performed in a particular thread, prior to a particular fence.
2336 * @param tid The ID of the thread to check
2337 * @param before_fence The fence from which to begin the search; if NULL, then
2338 * search for the most recent fence in the thread.
2339 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2341 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2343 /* All fences should have location FENCE_LOCATION */
2344 action_list_t *list = obj_map.get(FENCE_LOCATION);
2349 action_list_t::reverse_iterator rit = list->rbegin();
2352 for (; rit != list->rend(); rit++)
2353 if (*rit == before_fence)
2356 ASSERT(*rit == before_fence);
2360 for (; rit != list->rend(); rit++)
2361 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2367 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2368 * location). This function identifies the mutex according to the current
2369 * action, which is presumed to perform on the same mutex.
2370 * @param curr The current ModelAction; also denotes the object location to
2372 * @return The last unlock operation
2374 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2376 void *location = curr->get_location();
2377 action_list_t *list = obj_map.get(location);
2378 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2379 action_list_t::reverse_iterator rit;
2380 for (rit = list->rbegin(); rit != list->rend(); rit++)
2381 if ((*rit)->is_unlock() || (*rit)->is_wait())
2386 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2388 ModelAction *parent = get_last_action(tid);
2390 parent = get_thread(tid)->get_creation();
2395 * Returns the clock vector for a given thread.
2396 * @param tid The thread whose clock vector we want
2397 * @return Desired clock vector
2399 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2401 return get_parent_action(tid)->get_cv();
2405 * @brief Find the promise (if any) to resolve for the current action and
2406 * remove it from the pending promise vector
2407 * @param curr The current ModelAction. Should be a write.
2408 * @return The Promise to resolve, if any; otherwise NULL
2410 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2412 for (unsigned int i = 0; i < promises.size(); i++)
2413 if (curr->get_node()->get_promise(i)) {
2414 Promise *ret = promises[i];
2415 promises.erase(promises.begin() + i);
2422 * Resolve a Promise with a current write.
2423 * @param write The ModelAction that is fulfilling Promises
2424 * @param promise The Promise to resolve
2425 * @param work The work queue, for adding new fixup work
2426 * @return True if the Promise was successfully resolved; false otherwise
2428 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2431 ModelVector<ModelAction *> actions_to_check;
2433 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2434 ModelAction *read = promise->get_reader(i);
2435 if (read_from(read, write)) {
2436 /* Propagate the changed clock vector */
2437 propagate_clockvector(read, work);
2439 actions_to_check.push_back(read);
2441 /* Make sure the promise's value matches the write's value */
2442 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2443 if (!mo_graph->resolvePromise(promise, write))
2444 priv->hard_failed_promise = true;
2447 * @todo It is possible to end up in an inconsistent state, where a
2448 * "resolved" promise may still be referenced if
2449 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2451 * Note that the inconsistency only matters when dumping mo_graph to
2457 //Check whether reading these writes has made threads unable to
2459 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2460 ModelAction *read = actions_to_check[i];
2461 mo_check_promises(read, true);
2468 * Compute the set of promises that could potentially be satisfied by this
2469 * action. Note that the set computation actually appears in the Node, not in
2471 * @param curr The ModelAction that may satisfy promises
2473 void ModelExecution::compute_promises(ModelAction *curr)
2475 for (unsigned int i = 0; i < promises.size(); i++) {
2476 Promise *promise = promises[i];
2477 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2480 bool satisfy = true;
2481 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2482 const ModelAction *act = promise->get_reader(j);
2483 if (act->happens_before(curr) ||
2484 act->could_synchronize_with(curr)) {
2490 curr->get_node()->set_promise(i);
2494 /** Checks promises in response to change in ClockVector Threads. */
2495 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2497 for (unsigned int i = 0; i < promises.size(); i++) {
2498 Promise *promise = promises[i];
2499 if (!promise->thread_is_available(tid))
2501 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2502 const ModelAction *act = promise->get_reader(j);
2503 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2504 merge_cv->synchronized_since(act)) {
2505 if (promise->eliminate_thread(tid)) {
2506 /* Promise has failed */
2507 priv->failed_promise = true;
2515 void ModelExecution::check_promises_thread_disabled()
2517 for (unsigned int i = 0; i < promises.size(); i++) {
2518 Promise *promise = promises[i];
2519 if (promise->has_failed()) {
2520 priv->failed_promise = true;
2527 * @brief Checks promises in response to addition to modification order for
2530 * We test whether threads are still available for satisfying promises after an
2531 * addition to our modification order constraints. Those that are unavailable
2532 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2533 * that promise has failed.
2535 * @param act The ModelAction which updated the modification order
2536 * @param is_read_check Should be true if act is a read and we must check for
2537 * updates to the store from which it read (there is a distinction here for
2538 * RMW's, which are both a load and a store)
2540 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2542 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2544 for (unsigned int i = 0; i < promises.size(); i++) {
2545 Promise *promise = promises[i];
2547 // Is this promise on the same location?
2548 if (!promise->same_location(write))
2551 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2552 const ModelAction *pread = promise->get_reader(j);
2553 if (!pread->happens_before(act))
2555 if (mo_graph->checkPromise(write, promise)) {
2556 priv->hard_failed_promise = true;
2562 // Don't do any lookups twice for the same thread
2563 if (!promise->thread_is_available(act->get_tid()))
2566 if (mo_graph->checkReachable(promise, write)) {
2567 if (mo_graph->checkPromise(write, promise)) {
2568 priv->hard_failed_promise = true;
2576 * Compute the set of writes that may break the current pending release
2577 * sequence. This information is extracted from previou release sequence
2580 * @param curr The current ModelAction. Must be a release sequence fixup
2583 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2585 if (pending_rel_seqs.empty())
2588 struct release_seq *pending = pending_rel_seqs.back();
2589 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2590 const ModelAction *write = pending->writes[i];
2591 curr->get_node()->add_relseq_break(write);
2594 /* NULL means don't break the sequence; just synchronize */
2595 curr->get_node()->add_relseq_break(NULL);
2599 * Build up an initial set of all past writes that this 'read' action may read
2600 * from, as well as any previously-observed future values that must still be valid.
2602 * @param curr is the current ModelAction that we are exploring; it must be a
2605 void ModelExecution::build_may_read_from(ModelAction *curr)
2607 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2609 ASSERT(curr->is_read());
2611 ModelAction *last_sc_write = NULL;
2613 if (curr->is_seqcst())
2614 last_sc_write = get_last_seq_cst_write(curr);
2616 /* Iterate over all threads */
2617 for (i = 0; i < thrd_lists->size(); i++) {
2618 /* Iterate over actions in thread, starting from most recent */
2619 action_list_t *list = &(*thrd_lists)[i];
2620 action_list_t::reverse_iterator rit;
2621 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2622 ModelAction *act = *rit;
2624 /* Only consider 'write' actions */
2625 if (!act->is_write() || act == curr)
2628 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2629 bool allow_read = true;
2631 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2633 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2637 /* Only add feasible reads */
2638 mo_graph->startChanges();
2639 r_modification_order(curr, act);
2640 if (!is_infeasible())
2641 curr->get_node()->add_read_from_past(act);
2642 mo_graph->rollbackChanges();
2645 /* Include at most one act per-thread that "happens before" curr */
2646 if (act->happens_before(curr))
2651 /* Inherit existing, promised future values */
2652 for (i = 0; i < promises.size(); i++) {
2653 const Promise *promise = promises[i];
2654 const ModelAction *promise_read = promise->get_reader(0);
2655 if (promise_read->same_var(curr)) {
2656 /* Only add feasible future-values */
2657 mo_graph->startChanges();
2658 r_modification_order(curr, promise);
2659 if (!is_infeasible())
2660 curr->get_node()->add_read_from_promise(promise_read);
2661 mo_graph->rollbackChanges();
2665 /* We may find no valid may-read-from only if the execution is doomed */
2666 if (!curr->get_node()->read_from_size()) {
2667 priv->no_valid_reads = true;
2671 if (DBG_ENABLED()) {
2672 model_print("Reached read action:\n");
2674 model_print("Printing read_from_past\n");
2675 curr->get_node()->print_read_from_past();
2676 model_print("End printing read_from_past\n");
2680 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2682 for ( ; write != NULL; write = write->get_reads_from()) {
2683 /* UNINIT actions don't have a Node, and they never sleep */
2684 if (write->is_uninitialized())
2686 Node *prevnode = write->get_node()->get_parent();
2688 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2689 if (write->is_release() && thread_sleep)
2691 if (!write->is_rmw())
2698 * @brief Get an action representing an uninitialized atomic
2700 * This function may create a new one or try to retrieve one from the NodeStack
2702 * @param curr The current action, which prompts the creation of an UNINIT action
2703 * @return A pointer to the UNINIT ModelAction
2705 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2707 Node *node = curr->get_node();
2708 ModelAction *act = node->get_uninit_action();
2710 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2711 node->set_uninit_action(act);
2713 act->create_cv(NULL);
2717 static void print_list(const action_list_t *list)
2719 action_list_t::const_iterator it;
2721 model_print("------------------------------------------------------------------------------------\n");
2722 model_print("# t Action type MO Location Value Rf CV\n");
2723 model_print("------------------------------------------------------------------------------------\n");
2725 unsigned int hash = 0;
2727 for (it = list->begin(); it != list->end(); it++) {
2728 const ModelAction *act = *it;
2729 if (act->get_seq_number() > 0)
2731 hash = hash^(hash<<3)^((*it)->hash());
2733 model_print("HASH %u\n", hash);
2734 model_print("------------------------------------------------------------------------------------\n");
2737 #if SUPPORT_MOD_ORDER_DUMP
2738 void ModelExecution::dumpGraph(char *filename) const
2741 sprintf(buffer, "%s.dot", filename);
2742 FILE *file = fopen(buffer, "w");
2743 fprintf(file, "digraph %s {\n", filename);
2744 mo_graph->dumpNodes(file);
2745 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2747 for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2748 ModelAction *act = *it;
2749 if (act->is_read()) {
2750 mo_graph->dot_print_node(file, act);
2751 if (act->get_reads_from())
2752 mo_graph->dot_print_edge(file,
2753 act->get_reads_from(),
2755 "label=\"rf\", color=red, weight=2");
2757 mo_graph->dot_print_edge(file,
2758 act->get_reads_from_promise(),
2760 "label=\"rf\", color=red");
2762 if (thread_array[act->get_tid()]) {
2763 mo_graph->dot_print_edge(file,
2764 thread_array[id_to_int(act->get_tid())],
2766 "label=\"sb\", color=blue, weight=400");
2769 thread_array[act->get_tid()] = act;
2771 fprintf(file, "}\n");
2772 model_free(thread_array);
2777 /** @brief Prints an execution trace summary. */
2778 void ModelExecution::print_summary() const
2780 #if SUPPORT_MOD_ORDER_DUMP
2781 char buffername[100];
2782 sprintf(buffername, "exec%04u", get_execution_number());
2783 mo_graph->dumpGraphToFile(buffername);
2784 sprintf(buffername, "graph%04u", get_execution_number());
2785 dumpGraph(buffername);
2788 model_print("Execution trace %d:", get_execution_number());
2789 if (isfeasibleprefix()) {
2790 if (is_yieldblocked())
2791 model_print(" YIELD BLOCKED");
2792 if (scheduler->all_threads_sleeping())
2793 model_print(" SLEEP-SET REDUNDANT");
2794 if (have_bug_reports())
2795 model_print(" DETECTED BUG(S)");
2797 print_infeasibility(" INFEASIBLE");
2800 print_list(&action_trace);
2803 if (!promises.empty()) {
2804 model_print("Pending promises:\n");
2805 for (unsigned int i = 0; i < promises.size(); i++) {
2806 model_print(" [P%u] ", i);
2807 promises[i]->print();
2814 * Add a Thread to the system for the first time. Should only be called once
2816 * @param t The Thread to add
2818 void ModelExecution::add_thread(Thread *t)
2820 unsigned int i = id_to_int(t->get_id());
2821 if (i >= thread_map.size())
2822 thread_map.resize(i + 1);
2824 if (!t->is_model_thread())
2825 scheduler->add_thread(t);
2829 * @brief Get a Thread reference by its ID
2830 * @param tid The Thread's ID
2831 * @return A Thread reference
2833 Thread * ModelExecution::get_thread(thread_id_t tid) const
2835 unsigned int i = id_to_int(tid);
2836 if (i < thread_map.size())
2837 return thread_map[i];
2842 * @brief Get a reference to the Thread in which a ModelAction was executed
2843 * @param act The ModelAction
2844 * @return A Thread reference
2846 Thread * ModelExecution::get_thread(const ModelAction *act) const
2848 return get_thread(act->get_tid());
2852 * @brief Get a Promise's "promise number"
2854 * A "promise number" is an index number that is unique to a promise, valid
2855 * only for a specific snapshot of an execution trace. Promises may come and go
2856 * as they are generated an resolved, so an index only retains meaning for the
2859 * @param promise The Promise to check
2860 * @return The promise index, if the promise still is valid; otherwise -1
2862 int ModelExecution::get_promise_number(const Promise *promise) const
2864 for (unsigned int i = 0; i < promises.size(); i++)
2865 if (promises[i] == promise)
2872 * @brief Check if a Thread is currently enabled
2873 * @param t The Thread to check
2874 * @return True if the Thread is currently enabled
2876 bool ModelExecution::is_enabled(Thread *t) const
2878 return scheduler->is_enabled(t);
2882 * @brief Check if a Thread is currently enabled
2883 * @param tid The ID of the Thread to check
2884 * @return True if the Thread is currently enabled
2886 bool ModelExecution::is_enabled(thread_id_t tid) const
2888 return scheduler->is_enabled(tid);
2892 * @brief Select the next thread to execute based on the curren action
2894 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2895 * actions should be followed by the execution of their child thread. In either
2896 * case, the current action should determine the next thread schedule.
2898 * @param curr The current action
2899 * @return The next thread to run, if the current action will determine this
2900 * selection; otherwise NULL
2902 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2904 /* Do not split atomic RMW */
2905 if (curr->is_rmwr())
2906 return get_thread(curr);
2907 if (curr->is_write()) {
2908 // std::memory_order order = curr->get_mo();
2910 // case std::memory_order_relaxed:
2911 // return get_thread(curr);
2912 // case std::memory_order_release:
2913 // return get_thread(curr);
2920 /* Follow CREATE with the created thread */
2921 /* which is not needed, because model.cc takes care of this */
2922 if (curr->get_type() == THREAD_CREATE)
2923 return curr->get_thread_operand();
2924 if (curr->get_type() == PTHREAD_CREATE) {
2925 return curr->get_thread_operand();
2930 /** @return True if the execution has taken too many steps */
2931 bool ModelExecution::too_many_steps() const
2933 return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2937 * Takes the next step in the execution, if possible.
2938 * @param curr The current step to take
2939 * @return Returns the next Thread to run, if any; NULL if this execution
2942 Thread * ModelExecution::take_step(ModelAction *curr)
2944 Thread *curr_thrd = get_thread(curr);
2945 ASSERT(curr_thrd->get_state() == THREAD_READY);
2947 ASSERT(check_action_enabled(curr)); /* May have side effects? */
2948 curr = check_current_action(curr);
2951 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2952 scheduler->remove_thread(curr_thrd);
2954 return action_select_next_thread(curr);
2958 * Launch end-of-execution release sequence fixups only when
2959 * the execution is otherwise feasible AND there are:
2961 * (1) pending release sequences
2962 * (2) pending assertions that could be invalidated by a change
2963 * in clock vectors (i.e., data races)
2964 * (3) no pending promises
2966 void ModelExecution::fixup_release_sequences()
2968 while (!pending_rel_seqs.empty() &&
2969 is_feasible_prefix_ignore_relseq() &&
2970 haveUnrealizedRaces()) {
2971 model_print("*** WARNING: release sequence fixup action "
2972 "(%zu pending release seuqence(s)) ***\n",
2973 pending_rel_seqs.size());
2974 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2975 std::memory_order_seq_cst, NULL, VALUE_NONE,