10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
42 /* First thread created will have id INITIAL_THREAD_ID */
43 next_thread_id(INITIAL_THREAD_ID),
44 used_sequence_numbers(0),
48 failed_promise(false),
49 too_many_reads(false),
50 no_valid_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 unsigned int next_thread_id;
62 modelclock_t used_sequence_numbers;
63 ModelAction *next_backtrack;
64 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
163 * FIXME: if we utilize partial rollback, we will need to free only
164 * those pending actions which were NOT pending before the rollback
167 for (unsigned int i = 0; i < get_num_threads(); i++)
168 delete get_thread(int_to_id(i))->get_pending();
170 snapshot_backtrack_before(0);
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
176 return priv->next_thread_id++;
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
182 return priv->next_thread_id;
186 * Must be called from user-thread context (e.g., through the global
187 * thread_current() interface)
189 * @return The currently executing Thread.
191 Thread * ModelChecker::get_current_thread() const
193 return scheduler->get_current_thread();
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
199 return ++priv->used_sequence_numbers;
202 Node * ModelChecker::get_curr_node() const
204 return node_stack->get_head();
208 * @brief Choose the next thread to execute.
210 * This function chooses the next thread that should execute. It can force the
211 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213 * The model-checker may have no preference regarding the next thread (i.e.,
214 * when exploring a new execution ordering), in which case we defer to the
217 * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218 * might guide the choice of next thread (i.e., THREAD_CREATE should be
219 * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220 * @return The next chosen thread to run, if any exist. Or else if no threads
221 * remain to be executed, return NULL.
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
228 /* Do not split atomic actions. */
230 return get_thread(curr);
231 else if (curr->get_type() == THREAD_CREATE)
232 return curr->get_thread_operand();
236 * Have we completed exploring the preselected path? Then let the
240 return scheduler->select_next_thread();
242 /* Else, we are trying to replay an execution */
243 ModelAction *next = node_stack->get_next()->get_action();
245 if (next == diverge) {
246 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247 earliest_diverge = diverge;
249 Node *nextnode = next->get_node();
250 Node *prevnode = nextnode->get_parent();
251 scheduler->update_sleep_set(prevnode);
253 /* Reached divergence point */
254 if (nextnode->increment_misc()) {
255 /* The next node will try to satisfy a different misc_index values. */
256 tid = next->get_tid();
257 node_stack->pop_restofstack(2);
258 } else if (nextnode->increment_promise()) {
259 /* The next node will try to satisfy a different set of promises. */
260 tid = next->get_tid();
261 node_stack->pop_restofstack(2);
262 } else if (nextnode->increment_read_from()) {
263 /* The next node will read from a different value. */
264 tid = next->get_tid();
265 node_stack->pop_restofstack(2);
266 } else if (nextnode->increment_relseq_break()) {
267 /* The next node will try to resolve a release sequence differently */
268 tid = next->get_tid();
269 node_stack->pop_restofstack(2);
272 /* Make a different thread execute for next step */
273 scheduler->add_sleep(get_thread(next->get_tid()));
274 tid = prevnode->get_next_backtrack();
275 /* Make sure the backtracked thread isn't sleeping. */
276 node_stack->pop_restofstack(1);
277 if (diverge == earliest_diverge) {
278 earliest_diverge = prevnode->get_action();
281 /* Start the round robin scheduler from this thread id */
282 scheduler->set_scheduler_thread(tid);
283 /* The correct sleep set is in the parent node. */
286 DEBUG("*** Divergence point ***\n");
290 tid = next->get_tid();
292 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293 ASSERT(tid != THREAD_ID_T_NONE);
294 return thread_map->get(id_to_int(tid));
298 * We need to know what the next actions of all threads in the sleep
299 * set will be. This method computes them and stores the actions at
300 * the corresponding thread object's pending action.
303 void ModelChecker::execute_sleep_set()
305 for (unsigned int i = 0; i < get_num_threads(); i++) {
306 thread_id_t tid = int_to_id(i);
307 Thread *thr = get_thread(tid);
308 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309 thr->get_pending()->set_sleep_flag();
315 * @brief Should the current action wake up a given thread?
317 * @param curr The current action
318 * @param thread The thread that we might wake up
319 * @return True, if we should wake up the sleeping thread; false otherwise
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
323 const ModelAction *asleep = thread->get_pending();
324 /* Don't allow partial RMW to wake anyone up */
327 /* Synchronizing actions may have been backtracked */
328 if (asleep->could_synchronize_with(curr))
330 /* All acquire/release fences and fence-acquire/store-release */
331 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
333 /* Fence-release + store can awake load-acquire on the same location */
334 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
344 for (unsigned int i = 0; i < get_num_threads(); i++) {
345 Thread *thr = get_thread(int_to_id(i));
346 if (scheduler->is_sleep_set(thr)) {
347 if (should_wake_up(curr, thr))
348 /* Remove this thread from sleep set */
349 scheduler->remove_sleep(thr);
354 /** @brief Alert the model-checker that an incorrectly-ordered
355 * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
358 priv->bad_synchronization = true;
362 * Check whether the current trace has triggered an assertion which should halt
365 * @return True, if the execution should be aborted; false otherwise
367 bool ModelChecker::has_asserted() const
369 return priv->asserted;
373 * Trigger a trace assertion which should cause this execution to be halted.
374 * This can be due to a detected bug or due to an infeasibility that should
377 void ModelChecker::set_assert()
379 priv->asserted = true;
383 * Check if we are in a deadlock. Should only be called at the end of an
384 * execution, although it should not give false positives in the middle of an
385 * execution (there should be some ENABLED thread).
387 * @return True if program is in a deadlock; false otherwise
389 bool ModelChecker::is_deadlocked() const
391 bool blocking_threads = false;
392 for (unsigned int i = 0; i < get_num_threads(); i++) {
393 thread_id_t tid = int_to_id(i);
396 Thread *t = get_thread(tid);
397 if (!t->is_model_thread() && t->get_pending())
398 blocking_threads = true;
400 return blocking_threads;
404 * Check if this is a complete execution. That is, have all thread completed
405 * execution (rather than exiting because sleep sets have forced a redundant
408 * @return True if the execution is complete.
410 bool ModelChecker::is_complete_execution() const
412 for (unsigned int i = 0; i < get_num_threads(); i++)
413 if (is_enabled(int_to_id(i)))
419 * @brief Assert a bug in the executing program.
421 * Use this function to assert any sort of bug in the user program. If the
422 * current trace is feasible (actually, a prefix of some feasible execution),
423 * then this execution will be aborted, printing the appropriate message. If
424 * the current trace is not yet feasible, the error message will be stashed and
425 * printed if the execution ever becomes feasible.
427 * @param msg Descriptive message for the bug (do not include newline char)
428 * @return True if bug is immediately-feasible
430 bool ModelChecker::assert_bug(const char *msg)
432 priv->bugs.push_back(new bug_message(msg));
434 if (isfeasibleprefix()) {
442 * @brief Assert a bug in the executing program, asserted by a user thread
443 * @see ModelChecker::assert_bug
444 * @param msg Descriptive message for the bug (do not include newline char)
446 void ModelChecker::assert_user_bug(const char *msg)
448 /* If feasible bug, bail out now */
450 switch_to_master(NULL);
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
456 return priv->bugs.size() != 0;
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
462 if (have_bug_reports()) {
463 model_print("Bug report: %zu bug%s detected\n",
465 priv->bugs.size() > 1 ? "s" : "");
466 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467 priv->bugs[i]->print();
472 * @brief Record end-of-execution stats
474 * Must be run when exiting an execution. Records various stats.
475 * @see struct execution_stats
477 void ModelChecker::record_stats()
480 if (!isfeasibleprefix())
481 stats.num_infeasible++;
482 else if (have_bug_reports())
483 stats.num_buggy_executions++;
484 else if (is_complete_execution())
485 stats.num_complete++;
487 stats.num_redundant++;
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
493 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494 model_print("Number of redundant executions: %d\n", stats.num_redundant);
495 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497 model_print("Total executions: %d\n", stats.num_total);
498 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
502 * @brief End-of-exeuction print
503 * @param printbugs Should any existing bugs be printed?
505 void ModelChecker::print_execution(bool printbugs) const
507 print_program_output();
509 if (DBG_ENABLED() || params.verbose) {
510 model_print("Earliest divergence point since last feasible execution:\n");
511 if (earliest_diverge)
512 earliest_diverge->print();
514 model_print("(Not set)\n");
520 /* Don't print invalid bugs */
529 * Queries the model-checker for more executions to explore and, if one
530 * exists, resets the model-checker state to execute a new execution.
532 * @return If there are more executions to explore, return true. Otherwise,
535 bool ModelChecker::next_execution()
538 /* Is this execution a feasible execution that's worth bug-checking? */
539 bool complete = isfeasibleprefix() && (is_complete_execution() ||
542 /* End-of-execution bug checks */
545 assert_bug("Deadlock detected");
553 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
554 print_execution(complete);
556 clear_program_output();
559 earliest_diverge = NULL;
561 if ((diverge = get_next_backtrack()) == NULL)
565 model_print("Next execution will diverge at:\n");
569 reset_to_initial_state();
574 * @brief Find the last fence-related backtracking conflict for a ModelAction
576 * This function performs the search for the most recent conflicting action
577 * against which we should perform backtracking, as affected by fence
578 * operations. This includes pairs of potentially-synchronizing actions which
579 * occur due to fence-acquire or fence-release, and hence should be explored in
580 * the opposite execution order.
582 * @param act The current action
583 * @return The most recent action which conflicts with act due to fences
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
587 /* Only perform release/acquire fence backtracking for stores */
588 if (!act->is_write())
591 /* Find a fence-release (or, act is a release) */
592 ModelAction *last_release;
593 if (act->is_release())
596 last_release = get_last_fence_release(act->get_tid());
600 /* Skip past the release */
601 action_list_t *list = action_trace;
602 action_list_t::reverse_iterator rit;
603 for (rit = list->rbegin(); rit != list->rend(); rit++)
604 if (*rit == last_release)
606 ASSERT(rit != list->rend());
611 * load --sb-> fence-acquire */
612 std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
613 std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
614 bool found_acquire_fences = false;
615 for ( ; rit != list->rend(); rit++) {
616 ModelAction *prev = *rit;
617 if (act->same_thread(prev))
620 int tid = id_to_int(prev->get_tid());
622 if (prev->is_read() && act->same_var(prev)) {
623 if (prev->is_acquire()) {
624 /* Found most recent load-acquire, don't need
625 * to search for more fences */
626 if (!found_acquire_fences)
629 prior_loads[tid] = prev;
632 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633 found_acquire_fences = true;
634 acquire_fences[tid] = prev;
638 ModelAction *latest_backtrack = NULL;
639 for (unsigned int i = 0; i < acquire_fences.size(); i++)
640 if (acquire_fences[i] && prior_loads[i])
641 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642 latest_backtrack = acquire_fences[i];
643 return latest_backtrack;
647 * @brief Find the last backtracking conflict for a ModelAction
649 * This function performs the search for the most recent conflicting action
650 * against which we should perform backtracking. This primary includes pairs of
651 * synchronizing actions which should be explored in the opposite execution
654 * @param act The current action
655 * @return The most recent action which conflicts with act
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
659 switch (act->get_type()) {
660 /* case ATOMIC_FENCE: fences don't directly cause backtracking */
664 ModelAction *ret = NULL;
666 /* linear search: from most recent to oldest */
667 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668 action_list_t::reverse_iterator rit;
669 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670 ModelAction *prev = *rit;
671 if (prev->could_synchronize_with(act)) {
677 ModelAction *ret2 = get_last_fence_conflict(act);
687 case ATOMIC_TRYLOCK: {
688 /* linear search: from most recent to oldest */
689 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690 action_list_t::reverse_iterator rit;
691 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692 ModelAction *prev = *rit;
693 if (act->is_conflicting_lock(prev))
698 case ATOMIC_UNLOCK: {
699 /* linear search: from most recent to oldest */
700 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701 action_list_t::reverse_iterator rit;
702 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703 ModelAction *prev = *rit;
704 if (!act->same_thread(prev) && prev->is_failed_trylock())
710 /* linear search: from most recent to oldest */
711 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712 action_list_t::reverse_iterator rit;
713 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714 ModelAction *prev = *rit;
715 if (!act->same_thread(prev) && prev->is_failed_trylock())
717 if (!act->same_thread(prev) && prev->is_notify())
723 case ATOMIC_NOTIFY_ALL:
724 case ATOMIC_NOTIFY_ONE: {
725 /* linear search: from most recent to oldest */
726 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727 action_list_t::reverse_iterator rit;
728 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729 ModelAction *prev = *rit;
730 if (!act->same_thread(prev) && prev->is_wait())
741 /** This method finds backtracking points where we should try to
742 * reorder the parameter ModelAction against.
744 * @param the ModelAction to find backtracking points for.
746 void ModelChecker::set_backtracking(ModelAction *act)
748 Thread *t = get_thread(act);
749 ModelAction *prev = get_last_conflict(act);
753 Node *node = prev->get_node()->get_parent();
755 int low_tid, high_tid;
756 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
757 low_tid = id_to_int(act->get_tid());
758 high_tid = low_tid + 1;
761 high_tid = get_num_threads();
764 for (int i = low_tid; i < high_tid; i++) {
765 thread_id_t tid = int_to_id(i);
767 /* Make sure this thread can be enabled here. */
768 if (i >= node->get_num_threads())
771 /* Don't backtrack into a point where the thread is disabled or sleeping. */
772 if (node->enabled_status(tid) != THREAD_ENABLED)
775 /* Check if this has been explored already */
776 if (node->has_been_explored(tid))
779 /* See if fairness allows */
780 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
782 for (int t = 0; t < node->get_num_threads(); t++) {
783 thread_id_t tother = int_to_id(t);
784 if (node->is_enabled(tother) && node->has_priority(tother)) {
792 /* Cache the latest backtracking point */
793 set_latest_backtrack(prev);
795 /* If this is a new backtracking point, mark the tree */
796 if (!node->set_backtrack(tid))
798 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
799 id_to_int(prev->get_tid()),
800 id_to_int(t->get_id()));
809 * @brief Cache the a backtracking point as the "most recent", if eligible
811 * Note that this does not prepare the NodeStack for this backtracking
812 * operation, it only caches the action on a per-execution basis
814 * @param act The operation at which we should explore a different next action
815 * (i.e., backtracking point)
816 * @return True, if this action is now the most recent backtracking point;
819 bool ModelChecker::set_latest_backtrack(ModelAction *act)
821 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
822 priv->next_backtrack = act;
829 * Returns last backtracking point. The model checker will explore a different
830 * path for this point in the next execution.
831 * @return The ModelAction at which the next execution should diverge.
833 ModelAction * ModelChecker::get_next_backtrack()
835 ModelAction *next = priv->next_backtrack;
836 priv->next_backtrack = NULL;
841 * Processes a read model action.
842 * @param curr is the read model action to process.
843 * @return True if processing this read updates the mo_graph.
845 bool ModelChecker::process_read(ModelAction *curr)
847 Node *node = curr->get_node();
848 uint64_t value = VALUE_NONE;
849 bool updated = false;
851 switch (node->get_read_from_status()) {
852 case READ_FROM_PAST: {
853 const ModelAction *rf = node->get_read_from_past();
856 mo_graph->startChanges();
857 value = rf->get_value();
858 check_recency(curr, rf);
859 bool r_status = r_modification_order(curr, rf);
861 if (is_infeasible() && node->increment_read_from()) {
862 mo_graph->rollbackChanges();
863 priv->too_many_reads = false;
868 mo_graph->commitChanges();
869 mo_check_promises(curr, true);
874 case READ_FROM_PROMISE: {
875 const Promise *promise = curr->get_node()->get_read_from_promise();
876 value = promise->get_value();
877 curr->set_read_from_promise(promise);
878 mo_graph->startChanges();
879 updated = r_modification_order(curr, promise);
880 mo_graph->commitChanges();
883 case READ_FROM_FUTURE: {
884 /* Read from future value */
885 struct future_value fv = node->get_future_value();
886 Promise *promise = new Promise(curr, fv);
888 curr->set_read_from_promise(promise);
889 promises->push_back(promise);
890 mo_graph->startChanges();
891 updated = r_modification_order(curr, promise);
892 mo_graph->commitChanges();
898 get_thread(curr)->set_return_value(value);
904 * Processes a lock, trylock, or unlock model action. @param curr is
905 * the read model action to process.
907 * The try lock operation checks whether the lock is taken. If not,
908 * it falls to the normal lock operation case. If so, it returns
911 * The lock operation has already been checked that it is enabled, so
912 * it just grabs the lock and synchronizes with the previous unlock.
914 * The unlock operation has to re-enable all of the threads that are
915 * waiting on the lock.
917 * @return True if synchronization was updated; false otherwise
919 bool ModelChecker::process_mutex(ModelAction *curr)
921 std::mutex *mutex = NULL;
922 struct std::mutex_state *state = NULL;
924 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
925 mutex = (std::mutex *)curr->get_location();
926 state = mutex->get_state();
927 } else if (curr->is_wait()) {
928 mutex = (std::mutex *)curr->get_value();
929 state = mutex->get_state();
932 switch (curr->get_type()) {
933 case ATOMIC_TRYLOCK: {
934 bool success = !state->islocked;
935 curr->set_try_lock(success);
937 get_thread(curr)->set_return_value(0);
940 get_thread(curr)->set_return_value(1);
942 //otherwise fall into the lock case
944 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
945 assert_bug("Lock access before initialization");
946 state->islocked = true;
947 ModelAction *unlock = get_last_unlock(curr);
948 //synchronize with the previous unlock statement
949 if (unlock != NULL) {
950 curr->synchronize_with(unlock);
955 case ATOMIC_UNLOCK: {
957 state->islocked = false;
958 //wake up the other threads
959 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
960 //activate all the waiting threads
961 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
962 scheduler->wake(get_thread(*rit));
969 state->islocked = false;
970 //wake up the other threads
971 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
972 //activate all the waiting threads
973 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
974 scheduler->wake(get_thread(*rit));
977 //check whether we should go to sleep or not...simulate spurious failures
978 if (curr->get_node()->get_misc() == 0) {
979 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
981 scheduler->sleep(get_thread(curr));
985 case ATOMIC_NOTIFY_ALL: {
986 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
987 //activate all the waiting threads
988 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
989 scheduler->wake(get_thread(*rit));
994 case ATOMIC_NOTIFY_ONE: {
995 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
996 int wakeupthread = curr->get_node()->get_misc();
997 action_list_t::iterator it = waiters->begin();
998 advance(it, wakeupthread);
999 scheduler->wake(get_thread(*it));
1010 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1012 /* Do more ambitious checks now that mo is more complete */
1013 if (mo_may_allow(writer, reader)) {
1014 Node *node = reader->get_node();
1016 /* Find an ancestor thread which exists at the time of the reader */
1017 Thread *write_thread = get_thread(writer);
1018 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1019 write_thread = write_thread->get_parent();
1021 struct future_value fv = {
1022 writer->get_value(),
1023 writer->get_seq_number() + params.maxfuturedelay,
1024 write_thread->get_id(),
1026 if (node->add_future_value(fv))
1027 set_latest_backtrack(reader);
1032 * Process a write ModelAction
1033 * @param curr The ModelAction to process
1034 * @return True if the mo_graph was updated or promises were resolved
1036 bool ModelChecker::process_write(ModelAction *curr)
1038 bool updated_mod_order = w_modification_order(curr);
1039 bool updated_promises = resolve_promises(curr);
1041 if (promises->size() == 0) {
1042 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1043 struct PendingFutureValue pfv = (*futurevalues)[i];
1044 add_future_value(pfv.writer, pfv.act);
1046 futurevalues->clear();
1049 mo_graph->commitChanges();
1050 mo_check_promises(curr, false);
1052 get_thread(curr)->set_return_value(VALUE_NONE);
1053 return updated_mod_order || updated_promises;
1057 * Process a fence ModelAction
1058 * @param curr The ModelAction to process
1059 * @return True if synchronization was updated
1061 bool ModelChecker::process_fence(ModelAction *curr)
1064 * fence-relaxed: no-op
1065 * fence-release: only log the occurence (not in this function), for
1066 * use in later synchronization
1067 * fence-acquire (this function): search for hypothetical release
1070 bool updated = false;
1071 if (curr->is_acquire()) {
1072 action_list_t *list = action_trace;
1073 action_list_t::reverse_iterator rit;
1074 /* Find X : is_read(X) && X --sb-> curr */
1075 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1076 ModelAction *act = *rit;
1079 if (act->get_tid() != curr->get_tid())
1081 /* Stop at the beginning of the thread */
1082 if (act->is_thread_start())
1084 /* Stop once we reach a prior fence-acquire */
1085 if (act->is_fence() && act->is_acquire())
1087 if (!act->is_read())
1089 /* read-acquire will find its own release sequences */
1090 if (act->is_acquire())
1093 /* Establish hypothetical release sequences */
1094 rel_heads_list_t release_heads;
1095 get_release_seq_heads(curr, act, &release_heads);
1096 for (unsigned int i = 0; i < release_heads.size(); i++)
1097 if (!curr->synchronize_with(release_heads[i]))
1098 set_bad_synchronization();
1099 if (release_heads.size() != 0)
1107 * @brief Process the current action for thread-related activity
1109 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1110 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1111 * synchronization, etc. This function is a no-op for non-THREAD actions
1112 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1114 * @param curr The current action
1115 * @return True if synchronization was updated or a thread completed
1117 bool ModelChecker::process_thread_action(ModelAction *curr)
1119 bool updated = false;
1121 switch (curr->get_type()) {
1122 case THREAD_CREATE: {
1123 thrd_t *thrd = (thrd_t *)curr->get_location();
1124 struct thread_params *params = (struct thread_params *)curr->get_value();
1125 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1127 th->set_creation(curr);
1128 /* Promises can be satisfied by children */
1129 for (unsigned int i = 0; i < promises->size(); i++) {
1130 Promise *promise = (*promises)[i];
1131 if (promise->thread_is_available(curr->get_tid()))
1132 promise->add_thread(th->get_id());
1137 Thread *blocking = curr->get_thread_operand();
1138 ModelAction *act = get_last_action(blocking->get_id());
1139 curr->synchronize_with(act);
1140 updated = true; /* trigger rel-seq checks */
1143 case THREAD_FINISH: {
1144 Thread *th = get_thread(curr);
1145 while (!th->wait_list_empty()) {
1146 ModelAction *act = th->pop_wait_list();
1147 scheduler->wake(get_thread(act));
1150 /* Completed thread can't satisfy promises */
1151 for (unsigned int i = 0; i < promises->size(); i++) {
1152 Promise *promise = (*promises)[i];
1153 if (promise->thread_is_available(th->get_id()))
1154 if (promise->eliminate_thread(th->get_id()))
1155 priv->failed_promise = true;
1157 updated = true; /* trigger rel-seq checks */
1160 case THREAD_START: {
1161 check_promises(curr->get_tid(), NULL, curr->get_cv());
1172 * @brief Process the current action for release sequence fixup activity
1174 * Performs model-checker release sequence fixups for the current action,
1175 * forcing a single pending release sequence to break (with a given, potential
1176 * "loose" write) or to complete (i.e., synchronize). If a pending release
1177 * sequence forms a complete release sequence, then we must perform the fixup
1178 * synchronization, mo_graph additions, etc.
1180 * @param curr The current action; must be a release sequence fixup action
1181 * @param work_queue The work queue to which to add work items as they are
1184 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1186 const ModelAction *write = curr->get_node()->get_relseq_break();
1187 struct release_seq *sequence = pending_rel_seqs->back();
1188 pending_rel_seqs->pop_back();
1190 ModelAction *acquire = sequence->acquire;
1191 const ModelAction *rf = sequence->rf;
1192 const ModelAction *release = sequence->release;
1196 ASSERT(release->same_thread(rf));
1198 if (write == NULL) {
1200 * @todo Forcing a synchronization requires that we set
1201 * modification order constraints. For instance, we can't allow
1202 * a fixup sequence in which two separate read-acquire
1203 * operations read from the same sequence, where the first one
1204 * synchronizes and the other doesn't. Essentially, we can't
1205 * allow any writes to insert themselves between 'release' and
1209 /* Must synchronize */
1210 if (!acquire->synchronize_with(release)) {
1211 set_bad_synchronization();
1214 /* Re-check all pending release sequences */
1215 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1216 /* Re-check act for mo_graph edges */
1217 work_queue->push_back(MOEdgeWorkEntry(acquire));
1219 /* propagate synchronization to later actions */
1220 action_list_t::reverse_iterator rit = action_trace->rbegin();
1221 for (; (*rit) != acquire; rit++) {
1222 ModelAction *propagate = *rit;
1223 if (acquire->happens_before(propagate)) {
1224 propagate->synchronize_with(acquire);
1225 /* Re-check 'propagate' for mo_graph edges */
1226 work_queue->push_back(MOEdgeWorkEntry(propagate));
1230 /* Break release sequence with new edges:
1231 * release --mo--> write --mo--> rf */
1232 mo_graph->addEdge(release, write);
1233 mo_graph->addEdge(write, rf);
1236 /* See if we have realized a data race */
1241 * Initialize the current action by performing one or more of the following
1242 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1243 * in the NodeStack, manipulating backtracking sets, allocating and
1244 * initializing clock vectors, and computing the promises to fulfill.
1246 * @param curr The current action, as passed from the user context; may be
1247 * freed/invalidated after the execution of this function, with a different
1248 * action "returned" its place (pass-by-reference)
1249 * @return True if curr is a newly-explored action; false otherwise
1251 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1253 ModelAction *newcurr;
1255 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1256 newcurr = process_rmw(*curr);
1259 if (newcurr->is_rmw())
1260 compute_promises(newcurr);
1266 (*curr)->set_seq_number(get_next_seq_num());
1268 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1270 /* First restore type and order in case of RMW operation */
1271 if ((*curr)->is_rmwr())
1272 newcurr->copy_typeandorder(*curr);
1274 ASSERT((*curr)->get_location() == newcurr->get_location());
1275 newcurr->copy_from_new(*curr);
1277 /* Discard duplicate ModelAction; use action from NodeStack */
1280 /* Always compute new clock vector */
1281 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1284 return false; /* Action was explored previously */
1288 /* Always compute new clock vector */
1289 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1291 /* Assign most recent release fence */
1292 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1295 * Perform one-time actions when pushing new ModelAction onto
1298 if (newcurr->is_write())
1299 compute_promises(newcurr);
1300 else if (newcurr->is_relseq_fixup())
1301 compute_relseq_breakwrites(newcurr);
1302 else if (newcurr->is_wait())
1303 newcurr->get_node()->set_misc_max(2);
1304 else if (newcurr->is_notify_one()) {
1305 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1307 return true; /* This was a new ModelAction */
1312 * @brief Establish reads-from relation between two actions
1314 * Perform basic operations involved with establishing a concrete rf relation,
1315 * including setting the ModelAction data and checking for release sequences.
1317 * @param act The action that is reading (must be a read)
1318 * @param rf The action from which we are reading (must be a write)
1320 * @return True if this read established synchronization
1322 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1324 act->set_read_from(rf);
1325 if (rf != NULL && act->is_acquire()) {
1326 rel_heads_list_t release_heads;
1327 get_release_seq_heads(act, act, &release_heads);
1328 int num_heads = release_heads.size();
1329 for (unsigned int i = 0; i < release_heads.size(); i++)
1330 if (!act->synchronize_with(release_heads[i])) {
1331 set_bad_synchronization();
1334 return num_heads > 0;
1340 * Check promises and eliminate potentially-satisfying threads when a thread is
1341 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1342 * no longer satisfy a promise generated from that thread.
1344 * @param blocker The thread on which a thread is waiting
1345 * @param waiting The waiting thread
1347 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1349 for (unsigned int i = 0; i < promises->size(); i++) {
1350 Promise *promise = (*promises)[i];
1351 ModelAction *reader = promise->get_action();
1352 if (reader->get_tid() != blocker->get_id())
1354 if (!promise->thread_is_available(waiting->get_id()))
1356 if (promise->eliminate_thread(waiting->get_id())) {
1357 /* Promise has failed */
1358 priv->failed_promise = true;
1364 * @brief Check whether a model action is enabled.
1366 * Checks whether a lock or join operation would be successful (i.e., is the
1367 * lock already locked, or is the joined thread already complete). If not, put
1368 * the action in a waiter list.
1370 * @param curr is the ModelAction to check whether it is enabled.
1371 * @return a bool that indicates whether the action is enabled.
1373 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1374 if (curr->is_lock()) {
1375 std::mutex *lock = (std::mutex *)curr->get_location();
1376 struct std::mutex_state *state = lock->get_state();
1377 if (state->islocked) {
1378 //Stick the action in the appropriate waiting queue
1379 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1382 } else if (curr->get_type() == THREAD_JOIN) {
1383 Thread *blocking = (Thread *)curr->get_location();
1384 if (!blocking->is_complete()) {
1385 blocking->push_wait_list(curr);
1386 thread_blocking_check_promises(blocking, get_thread(curr));
1395 * This is the heart of the model checker routine. It performs model-checking
1396 * actions corresponding to a given "current action." Among other processes, it
1397 * calculates reads-from relationships, updates synchronization clock vectors,
1398 * forms a memory_order constraints graph, and handles replay/backtrack
1399 * execution when running permutations of previously-observed executions.
1401 * @param curr The current action to process
1402 * @return The ModelAction that is actually executed; may be different than
1403 * curr; may be NULL, if the current action is not enabled to run
1405 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1408 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1410 if (!check_action_enabled(curr)) {
1411 /* Make the execution look like we chose to run this action
1412 * much later, when a lock/join can succeed */
1413 get_thread(curr)->set_pending(curr);
1414 scheduler->sleep(get_thread(curr));
1418 bool newly_explored = initialize_curr_action(&curr);
1424 wake_up_sleeping_actions(curr);
1426 /* Add the action to lists before any other model-checking tasks */
1427 if (!second_part_of_rmw)
1428 add_action_to_lists(curr);
1430 /* Build may_read_from set for newly-created actions */
1431 if (newly_explored && curr->is_read())
1432 build_may_read_from(curr);
1434 /* Initialize work_queue with the "current action" work */
1435 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1436 while (!work_queue.empty() && !has_asserted()) {
1437 WorkQueueEntry work = work_queue.front();
1438 work_queue.pop_front();
1440 switch (work.type) {
1441 case WORK_CHECK_CURR_ACTION: {
1442 ModelAction *act = work.action;
1443 bool update = false; /* update this location's release seq's */
1444 bool update_all = false; /* update all release seq's */
1446 if (process_thread_action(curr))
1449 if (act->is_read() && !second_part_of_rmw && process_read(act))
1452 if (act->is_write() && process_write(act))
1455 if (act->is_fence() && process_fence(act))
1458 if (act->is_mutex_op() && process_mutex(act))
1461 if (act->is_relseq_fixup())
1462 process_relseq_fixup(curr, &work_queue);
1465 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1467 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1470 case WORK_CHECK_RELEASE_SEQ:
1471 resolve_release_sequences(work.location, &work_queue);
1473 case WORK_CHECK_MO_EDGES: {
1474 /** @todo Complete verification of work_queue */
1475 ModelAction *act = work.action;
1476 bool updated = false;
1478 if (act->is_read()) {
1479 const ModelAction *rf = act->get_reads_from();
1480 const Promise *promise = act->get_reads_from_promise();
1482 if (r_modification_order(act, rf))
1484 } else if (promise) {
1485 if (r_modification_order(act, promise))
1489 if (act->is_write()) {
1490 if (w_modification_order(act))
1493 mo_graph->commitChanges();
1496 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1505 check_curr_backtracking(curr);
1506 set_backtracking(curr);
1510 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1512 Node *currnode = curr->get_node();
1513 Node *parnode = currnode->get_parent();
1515 if ((parnode && !parnode->backtrack_empty()) ||
1516 !currnode->misc_empty() ||
1517 !currnode->read_from_empty() ||
1518 !currnode->promise_empty() ||
1519 !currnode->relseq_break_empty()) {
1520 set_latest_backtrack(curr);
1524 bool ModelChecker::promises_expired() const
1526 for (unsigned int i = 0; i < promises->size(); i++) {
1527 Promise *promise = (*promises)[i];
1528 if (promise->get_expiration() < priv->used_sequence_numbers)
1535 * This is the strongest feasibility check available.
1536 * @return whether the current trace (partial or complete) must be a prefix of
1539 bool ModelChecker::isfeasibleprefix() const
1541 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1545 * Print disagnostic information about an infeasible execution
1546 * @param prefix A string to prefix the output with; if NULL, then a default
1547 * message prefix will be provided
1549 void ModelChecker::print_infeasibility(const char *prefix) const
1553 if (mo_graph->checkForCycles())
1554 ptr += sprintf(ptr, "[mo cycle]");
1555 if (priv->failed_promise)
1556 ptr += sprintf(ptr, "[failed promise]");
1557 if (priv->too_many_reads)
1558 ptr += sprintf(ptr, "[too many reads]");
1559 if (priv->no_valid_reads)
1560 ptr += sprintf(ptr, "[no valid reads-from]");
1561 if (priv->bad_synchronization)
1562 ptr += sprintf(ptr, "[bad sw ordering]");
1563 if (promises_expired())
1564 ptr += sprintf(ptr, "[promise expired]");
1565 if (promises->size() != 0)
1566 ptr += sprintf(ptr, "[unresolved promise]");
1568 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1572 * Returns whether the current completed trace is feasible, except for pending
1573 * release sequences.
1575 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1577 return !is_infeasible() && promises->size() == 0;
1581 * Check if the current partial trace is infeasible. Does not check any
1582 * end-of-execution flags, which might rule out the execution. Thus, this is
1583 * useful only for ruling an execution as infeasible.
1584 * @return whether the current partial trace is infeasible.
1586 bool ModelChecker::is_infeasible() const
1588 return mo_graph->checkForCycles() ||
1589 priv->no_valid_reads ||
1590 priv->failed_promise ||
1591 priv->too_many_reads ||
1592 priv->bad_synchronization ||
1596 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1597 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1598 ModelAction *lastread = get_last_action(act->get_tid());
1599 lastread->process_rmw(act);
1600 if (act->is_rmw()) {
1601 if (lastread->get_reads_from())
1602 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1604 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1605 mo_graph->commitChanges();
1611 * Checks whether a thread has read from the same write for too many times
1612 * without seeing the effects of a later write.
1615 * 1) there must a different write that we could read from that would satisfy the modification order,
1616 * 2) we must have read from the same value in excess of maxreads times, and
1617 * 3) that other write must have been in the reads_from set for maxreads times.
1619 * If so, we decide that the execution is no longer feasible.
1621 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1623 if (params.maxreads != 0) {
1624 if (curr->get_node()->get_read_from_past_size() <= 1)
1626 //Must make sure that execution is currently feasible... We could
1627 //accidentally clear by rolling back
1628 if (is_infeasible())
1630 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1631 int tid = id_to_int(curr->get_tid());
1634 if ((int)thrd_lists->size() <= tid)
1636 action_list_t *list = &(*thrd_lists)[tid];
1638 action_list_t::reverse_iterator rit = list->rbegin();
1639 /* Skip past curr */
1640 for (; (*rit) != curr; rit++)
1642 /* go past curr now */
1645 action_list_t::reverse_iterator ritcopy = rit;
1646 //See if we have enough reads from the same value
1648 for (; count < params.maxreads; rit++, count++) {
1649 if (rit == list->rend())
1651 ModelAction *act = *rit;
1652 if (!act->is_read())
1655 if (act->get_reads_from() != rf)
1657 if (act->get_node()->get_read_from_past_size() <= 1)
1660 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1662 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1664 /* Need a different write */
1668 /* Test to see whether this is a feasible write to read from */
1669 /** NOTE: all members of read-from set should be
1670 * feasible, so we no longer check it here **/
1674 bool feasiblewrite = true;
1675 //new we need to see if this write works for everyone
1677 for (int loop = count; loop > 0; loop--, rit++) {
1678 ModelAction *act = *rit;
1679 bool foundvalue = false;
1680 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1681 if (act->get_node()->get_read_from_past(j) == write) {
1687 feasiblewrite = false;
1691 if (feasiblewrite) {
1692 priv->too_many_reads = true;
1700 * Updates the mo_graph with the constraints imposed from the current
1703 * Basic idea is the following: Go through each other thread and find
1704 * the last action that happened before our read. Two cases:
1706 * (1) The action is a write => that write must either occur before
1707 * the write we read from or be the write we read from.
1709 * (2) The action is a read => the write that that action read from
1710 * must occur before the write we read from or be the same write.
1712 * @param curr The current action. Must be a read.
1713 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1714 * @return True if modification order edges were added; false otherwise
1716 template <typename rf_type>
1717 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1719 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1722 ASSERT(curr->is_read());
1724 /* Last SC fence in the current thread */
1725 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1727 /* Iterate over all threads */
1728 for (i = 0; i < thrd_lists->size(); i++) {
1729 /* Last SC fence in thread i */
1730 ModelAction *last_sc_fence_thread_local = NULL;
1731 if (int_to_id((int)i) != curr->get_tid())
1732 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1734 /* Last SC fence in thread i, before last SC fence in current thread */
1735 ModelAction *last_sc_fence_thread_before = NULL;
1736 if (last_sc_fence_local)
1737 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1739 /* Iterate over actions in thread, starting from most recent */
1740 action_list_t *list = &(*thrd_lists)[i];
1741 action_list_t::reverse_iterator rit;
1742 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1743 ModelAction *act = *rit;
1745 if (act->is_write() && !act->equals(rf) && act != curr) {
1746 /* C++, Section 29.3 statement 5 */
1747 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1748 *act < *last_sc_fence_thread_local) {
1749 added = mo_graph->addEdge(act, rf) || added;
1752 /* C++, Section 29.3 statement 4 */
1753 else if (act->is_seqcst() && last_sc_fence_local &&
1754 *act < *last_sc_fence_local) {
1755 added = mo_graph->addEdge(act, rf) || added;
1758 /* C++, Section 29.3 statement 6 */
1759 else if (last_sc_fence_thread_before &&
1760 *act < *last_sc_fence_thread_before) {
1761 added = mo_graph->addEdge(act, rf) || added;
1767 * Include at most one act per-thread that "happens
1768 * before" curr. Don't consider reflexively.
1770 if (act->happens_before(curr) && act != curr) {
1771 if (act->is_write()) {
1772 if (!act->equals(rf)) {
1773 added = mo_graph->addEdge(act, rf) || added;
1776 const ModelAction *prevrf = act->get_reads_from();
1777 const Promise *prevrf_promise = act->get_reads_from_promise();
1779 if (!prevrf->equals(rf))
1780 added = mo_graph->addEdge(prevrf, rf) || added;
1781 } else if (!prevrf_promise->equals(rf)) {
1782 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1791 * All compatible, thread-exclusive promises must be ordered after any
1792 * concrete loads from the same thread
1794 for (unsigned int i = 0; i < promises->size(); i++)
1795 if ((*promises)[i]->is_compatible_exclusive(curr))
1796 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1802 * Updates the mo_graph with the constraints imposed from the current write.
1804 * Basic idea is the following: Go through each other thread and find
1805 * the lastest action that happened before our write. Two cases:
1807 * (1) The action is a write => that write must occur before
1810 * (2) The action is a read => the write that that action read from
1811 * must occur before the current write.
1813 * This method also handles two other issues:
1815 * (I) Sequential Consistency: Making sure that if the current write is
1816 * seq_cst, that it occurs after the previous seq_cst write.
1818 * (II) Sending the write back to non-synchronizing reads.
1820 * @param curr The current action. Must be a write.
1821 * @return True if modification order edges were added; false otherwise
1823 bool ModelChecker::w_modification_order(ModelAction *curr)
1825 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1828 ASSERT(curr->is_write());
1830 if (curr->is_seqcst()) {
1831 /* We have to at least see the last sequentially consistent write,
1832 so we are initialized. */
1833 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1834 if (last_seq_cst != NULL) {
1835 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1839 /* Last SC fence in the current thread */
1840 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1842 /* Iterate over all threads */
1843 for (i = 0; i < thrd_lists->size(); i++) {
1844 /* Last SC fence in thread i, before last SC fence in current thread */
1845 ModelAction *last_sc_fence_thread_before = NULL;
1846 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1847 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1849 /* Iterate over actions in thread, starting from most recent */
1850 action_list_t *list = &(*thrd_lists)[i];
1851 action_list_t::reverse_iterator rit;
1852 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1853 ModelAction *act = *rit;
1856 * 1) If RMW and it actually read from something, then we
1857 * already have all relevant edges, so just skip to next
1860 * 2) If RMW and it didn't read from anything, we should
1861 * whatever edge we can get to speed up convergence.
1863 * 3) If normal write, we need to look at earlier actions, so
1864 * continue processing list.
1866 if (curr->is_rmw()) {
1867 if (curr->get_reads_from() != NULL)
1875 /* C++, Section 29.3 statement 7 */
1876 if (last_sc_fence_thread_before && act->is_write() &&
1877 *act < *last_sc_fence_thread_before) {
1878 added = mo_graph->addEdge(act, curr) || added;
1883 * Include at most one act per-thread that "happens
1886 if (act->happens_before(curr)) {
1888 * Note: if act is RMW, just add edge:
1890 * The following edge should be handled elsewhere:
1891 * readfrom(act) --mo--> act
1893 if (act->is_write())
1894 added = mo_graph->addEdge(act, curr) || added;
1895 else if (act->is_read()) {
1896 //if previous read accessed a null, just keep going
1897 if (act->get_reads_from() == NULL)
1899 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1902 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1903 !act->same_thread(curr)) {
1904 /* We have an action that:
1905 (1) did not happen before us
1906 (2) is a read and we are a write
1907 (3) cannot synchronize with us
1908 (4) is in a different thread
1910 that read could potentially read from our write. Note that
1911 these checks are overly conservative at this point, we'll
1912 do more checks before actually removing the
1916 if (thin_air_constraint_may_allow(curr, act)) {
1917 if (!is_infeasible())
1918 futurevalues->push_back(PendingFutureValue(curr, act));
1919 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1920 add_future_value(curr, act);
1927 * All compatible, thread-exclusive promises must be ordered after any
1928 * concrete stores to the same thread, or else they can be merged with
1931 for (unsigned int i = 0; i < promises->size(); i++)
1932 if ((*promises)[i]->is_compatible_exclusive(curr))
1933 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1938 /** Arbitrary reads from the future are not allowed. Section 29.3
1939 * part 9 places some constraints. This method checks one result of constraint
1940 * constraint. Others require compiler support. */
1941 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1943 if (!writer->is_rmw())
1946 if (!reader->is_rmw())
1949 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1950 if (search == reader)
1952 if (search->get_tid() == reader->get_tid() &&
1953 search->happens_before(reader))
1961 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1962 * some constraints. This method checks one the following constraint (others
1963 * require compiler support):
1965 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1967 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1969 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1971 /* Iterate over all threads */
1972 for (i = 0; i < thrd_lists->size(); i++) {
1973 const ModelAction *write_after_read = NULL;
1975 /* Iterate over actions in thread, starting from most recent */
1976 action_list_t *list = &(*thrd_lists)[i];
1977 action_list_t::reverse_iterator rit;
1978 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1979 ModelAction *act = *rit;
1981 /* Don't disallow due to act == reader */
1982 if (!reader->happens_before(act) || reader == act)
1984 else if (act->is_write())
1985 write_after_read = act;
1986 else if (act->is_read() && act->get_reads_from() != NULL)
1987 write_after_read = act->get_reads_from();
1990 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1997 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1998 * The ModelAction under consideration is expected to be taking part in
1999 * release/acquire synchronization as an object of the "reads from" relation.
2000 * Note that this can only provide release sequence support for RMW chains
2001 * which do not read from the future, as those actions cannot be traced until
2002 * their "promise" is fulfilled. Similarly, we may not even establish the
2003 * presence of a release sequence with certainty, as some modification order
2004 * constraints may be decided further in the future. Thus, this function
2005 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2006 * and a boolean representing certainty.
2008 * @param rf The action that might be part of a release sequence. Must be a
2010 * @param release_heads A pass-by-reference style return parameter. After
2011 * execution of this function, release_heads will contain the heads of all the
2012 * relevant release sequences, if any exists with certainty
2013 * @param pending A pass-by-reference style return parameter which is only used
2014 * when returning false (i.e., uncertain). Returns most information regarding
2015 * an uncertain release sequence, including any write operations that might
2016 * break the sequence.
2017 * @return true, if the ModelChecker is certain that release_heads is complete;
2020 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2021 rel_heads_list_t *release_heads,
2022 struct release_seq *pending) const
2024 /* Only check for release sequences if there are no cycles */
2025 if (mo_graph->checkForCycles())
2028 for ( ; rf != NULL; rf = rf->get_reads_from()) {
2029 ASSERT(rf->is_write());
2031 if (rf->is_release())
2032 release_heads->push_back(rf);
2033 else if (rf->get_last_fence_release())
2034 release_heads->push_back(rf->get_last_fence_release());
2036 break; /* End of RMW chain */
2038 /** @todo Need to be smarter here... In the linux lock
2039 * example, this will run to the beginning of the program for
2041 /** @todo The way to be smarter here is to keep going until 1
2042 * thread has a release preceded by an acquire and you've seen
2045 /* acq_rel RMW is a sufficient stopping condition */
2046 if (rf->is_acquire() && rf->is_release())
2047 return true; /* complete */
2050 /* read from future: need to settle this later */
2052 return false; /* incomplete */
2055 if (rf->is_release())
2056 return true; /* complete */
2058 /* else relaxed write
2059 * - check for fence-release in the same thread (29.8, stmt. 3)
2060 * - check modification order for contiguous subsequence
2061 * -> rf must be same thread as release */
2063 const ModelAction *fence_release = rf->get_last_fence_release();
2064 /* Synchronize with a fence-release unconditionally; we don't need to
2065 * find any more "contiguous subsequence..." for it */
2067 release_heads->push_back(fence_release);
2069 int tid = id_to_int(rf->get_tid());
2070 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2071 action_list_t *list = &(*thrd_lists)[tid];
2072 action_list_t::const_reverse_iterator rit;
2074 /* Find rf in the thread list */
2075 rit = std::find(list->rbegin(), list->rend(), rf);
2076 ASSERT(rit != list->rend());
2078 /* Find the last {write,fence}-release */
2079 for (; rit != list->rend(); rit++) {
2080 if (fence_release && *(*rit) < *fence_release)
2082 if ((*rit)->is_release())
2085 if (rit == list->rend()) {
2086 /* No write-release in this thread */
2087 return true; /* complete */
2088 } else if (fence_release && *(*rit) < *fence_release) {
2089 /* The fence-release is more recent (and so, "stronger") than
2090 * the most recent write-release */
2091 return true; /* complete */
2092 } /* else, need to establish contiguous release sequence */
2093 ModelAction *release = *rit;
2095 ASSERT(rf->same_thread(release));
2097 pending->writes.clear();
2099 bool certain = true;
2100 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2101 if (id_to_int(rf->get_tid()) == (int)i)
2103 list = &(*thrd_lists)[i];
2105 /* Can we ensure no future writes from this thread may break
2106 * the release seq? */
2107 bool future_ordered = false;
2109 ModelAction *last = get_last_action(int_to_id(i));
2110 Thread *th = get_thread(int_to_id(i));
2111 if ((last && rf->happens_before(last)) ||
2114 future_ordered = true;
2116 ASSERT(!th->is_model_thread() || future_ordered);
2118 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2119 const ModelAction *act = *rit;
2120 /* Reach synchronization -> this thread is complete */
2121 if (act->happens_before(release))
2123 if (rf->happens_before(act)) {
2124 future_ordered = true;
2128 /* Only non-RMW writes can break release sequences */
2129 if (!act->is_write() || act->is_rmw())
2132 /* Check modification order */
2133 if (mo_graph->checkReachable(rf, act)) {
2134 /* rf --mo--> act */
2135 future_ordered = true;
2138 if (mo_graph->checkReachable(act, release))
2139 /* act --mo--> release */
2141 if (mo_graph->checkReachable(release, act) &&
2142 mo_graph->checkReachable(act, rf)) {
2143 /* release --mo-> act --mo--> rf */
2144 return true; /* complete */
2146 /* act may break release sequence */
2147 pending->writes.push_back(act);
2150 if (!future_ordered)
2151 certain = false; /* This thread is uncertain */
2155 release_heads->push_back(release);
2156 pending->writes.clear();
2158 pending->release = release;
2165 * An interface for getting the release sequence head(s) with which a
2166 * given ModelAction must synchronize. This function only returns a non-empty
2167 * result when it can locate a release sequence head with certainty. Otherwise,
2168 * it may mark the internal state of the ModelChecker so that it will handle
2169 * the release sequence at a later time, causing @a acquire to update its
2170 * synchronization at some later point in execution.
2172 * @param acquire The 'acquire' action that may synchronize with a release
2174 * @param read The read action that may read from a release sequence; this may
2175 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2176 * when 'acquire' is a fence-acquire)
2177 * @param release_heads A pass-by-reference return parameter. Will be filled
2178 * with the head(s) of the release sequence(s), if they exists with certainty.
2179 * @see ModelChecker::release_seq_heads
2181 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2182 ModelAction *read, rel_heads_list_t *release_heads)
2184 const ModelAction *rf = read->get_reads_from();
2185 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2186 sequence->acquire = acquire;
2187 sequence->read = read;
2189 if (!release_seq_heads(rf, release_heads, sequence)) {
2190 /* add act to 'lazy checking' list */
2191 pending_rel_seqs->push_back(sequence);
2193 snapshot_free(sequence);
2198 * Attempt to resolve all stashed operations that might synchronize with a
2199 * release sequence for a given location. This implements the "lazy" portion of
2200 * determining whether or not a release sequence was contiguous, since not all
2201 * modification order information is present at the time an action occurs.
2203 * @param location The location/object that should be checked for release
2204 * sequence resolutions. A NULL value means to check all locations.
2205 * @param work_queue The work queue to which to add work items as they are
2207 * @return True if any updates occurred (new synchronization, new mo_graph
2210 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2212 bool updated = false;
2213 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2214 while (it != pending_rel_seqs->end()) {
2215 struct release_seq *pending = *it;
2216 ModelAction *acquire = pending->acquire;
2217 const ModelAction *read = pending->read;
2219 /* Only resolve sequences on the given location, if provided */
2220 if (location && read->get_location() != location) {
2225 const ModelAction *rf = read->get_reads_from();
2226 rel_heads_list_t release_heads;
2228 complete = release_seq_heads(rf, &release_heads, pending);
2229 for (unsigned int i = 0; i < release_heads.size(); i++) {
2230 if (!acquire->has_synchronized_with(release_heads[i])) {
2231 if (acquire->synchronize_with(release_heads[i]))
2234 set_bad_synchronization();
2239 /* Re-check all pending release sequences */
2240 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2241 /* Re-check read-acquire for mo_graph edges */
2242 if (acquire->is_read())
2243 work_queue->push_back(MOEdgeWorkEntry(acquire));
2245 /* propagate synchronization to later actions */
2246 action_list_t::reverse_iterator rit = action_trace->rbegin();
2247 for (; (*rit) != acquire; rit++) {
2248 ModelAction *propagate = *rit;
2249 if (acquire->happens_before(propagate)) {
2250 propagate->synchronize_with(acquire);
2251 /* Re-check 'propagate' for mo_graph edges */
2252 work_queue->push_back(MOEdgeWorkEntry(propagate));
2257 it = pending_rel_seqs->erase(it);
2258 snapshot_free(pending);
2264 // If we resolved promises or data races, see if we have realized a data race.
2271 * Performs various bookkeeping operations for the current ModelAction. For
2272 * instance, adds action to the per-object, per-thread action vector and to the
2273 * action trace list of all thread actions.
2275 * @param act is the ModelAction to add.
2277 void ModelChecker::add_action_to_lists(ModelAction *act)
2279 int tid = id_to_int(act->get_tid());
2280 ModelAction *uninit = NULL;
2282 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2283 if (list->empty() && act->is_atomic_var()) {
2284 uninit = new_uninitialized_action(act->get_location());
2285 uninit_id = id_to_int(uninit->get_tid());
2286 list->push_back(uninit);
2288 list->push_back(act);
2290 action_trace->push_back(act);
2292 action_trace->push_front(uninit);
2294 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2295 if (tid >= (int)vec->size())
2296 vec->resize(priv->next_thread_id);
2297 (*vec)[tid].push_back(act);
2299 (*vec)[uninit_id].push_front(uninit);
2301 if ((int)thrd_last_action->size() <= tid)
2302 thrd_last_action->resize(get_num_threads());
2303 (*thrd_last_action)[tid] = act;
2305 (*thrd_last_action)[uninit_id] = uninit;
2307 if (act->is_fence() && act->is_release()) {
2308 if ((int)thrd_last_fence_release->size() <= tid)
2309 thrd_last_fence_release->resize(get_num_threads());
2310 (*thrd_last_fence_release)[tid] = act;
2313 if (act->is_wait()) {
2314 void *mutex_loc = (void *) act->get_value();
2315 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2317 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2318 if (tid >= (int)vec->size())
2319 vec->resize(priv->next_thread_id);
2320 (*vec)[tid].push_back(act);
2325 * @brief Get the last action performed by a particular Thread
2326 * @param tid The thread ID of the Thread in question
2327 * @return The last action in the thread
2329 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2331 int threadid = id_to_int(tid);
2332 if (threadid < (int)thrd_last_action->size())
2333 return (*thrd_last_action)[id_to_int(tid)];
2339 * @brief Get the last fence release performed by a particular Thread
2340 * @param tid The thread ID of the Thread in question
2341 * @return The last fence release in the thread, if one exists; NULL otherwise
2343 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2345 int threadid = id_to_int(tid);
2346 if (threadid < (int)thrd_last_fence_release->size())
2347 return (*thrd_last_fence_release)[id_to_int(tid)];
2353 * Gets the last memory_order_seq_cst write (in the total global sequence)
2354 * performed on a particular object (i.e., memory location), not including the
2356 * @param curr The current ModelAction; also denotes the object location to
2358 * @return The last seq_cst write
2360 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2362 void *location = curr->get_location();
2363 action_list_t *list = get_safe_ptr_action(obj_map, location);
2364 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2365 action_list_t::reverse_iterator rit;
2366 for (rit = list->rbegin(); rit != list->rend(); rit++)
2367 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2373 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2374 * performed in a particular thread, prior to a particular fence.
2375 * @param tid The ID of the thread to check
2376 * @param before_fence The fence from which to begin the search; if NULL, then
2377 * search for the most recent fence in the thread.
2378 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2380 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2382 /* All fences should have NULL location */
2383 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2384 action_list_t::reverse_iterator rit = list->rbegin();
2387 for (; rit != list->rend(); rit++)
2388 if (*rit == before_fence)
2391 ASSERT(*rit == before_fence);
2395 for (; rit != list->rend(); rit++)
2396 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2402 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2403 * location). This function identifies the mutex according to the current
2404 * action, which is presumed to perform on the same mutex.
2405 * @param curr The current ModelAction; also denotes the object location to
2407 * @return The last unlock operation
2409 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2411 void *location = curr->get_location();
2412 action_list_t *list = get_safe_ptr_action(obj_map, location);
2413 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2414 action_list_t::reverse_iterator rit;
2415 for (rit = list->rbegin(); rit != list->rend(); rit++)
2416 if ((*rit)->is_unlock() || (*rit)->is_wait())
2421 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2423 ModelAction *parent = get_last_action(tid);
2425 parent = get_thread(tid)->get_creation();
2430 * Returns the clock vector for a given thread.
2431 * @param tid The thread whose clock vector we want
2432 * @return Desired clock vector
2434 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2436 return get_parent_action(tid)->get_cv();
2440 * Resolve a set of Promises with a current write. The set is provided in the
2441 * Node corresponding to @a write.
2442 * @param write The ModelAction that is fulfilling Promises
2443 * @return True if promises were resolved; false otherwise
2445 bool ModelChecker::resolve_promises(ModelAction *write)
2447 bool haveResolved = false;
2448 std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2449 promise_list_t mustResolve, resolved;
2451 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2452 Promise *promise = (*promises)[promise_index];
2453 if (write->get_node()->get_promise(i)) {
2454 ModelAction *read = promise->get_action();
2455 read_from(read, write);
2456 //Make sure the promise's value matches the write's value
2457 ASSERT(promise->is_compatible(write));
2458 mo_graph->resolvePromise(promise, write, &mustResolve);
2460 resolved.push_back(promise);
2461 promises->erase(promises->begin() + promise_index);
2462 actions_to_check.push_back(read);
2464 haveResolved = true;
2469 for (unsigned int i = 0; i < mustResolve.size(); i++) {
2470 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2472 priv->failed_promise = true;
2474 for (unsigned int i = 0; i < resolved.size(); i++)
2476 //Check whether reading these writes has made threads unable to
2479 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2480 ModelAction *read = actions_to_check[i];
2481 mo_check_promises(read, true);
2484 return haveResolved;
2488 * Compute the set of promises that could potentially be satisfied by this
2489 * action. Note that the set computation actually appears in the Node, not in
2491 * @param curr The ModelAction that may satisfy promises
2493 void ModelChecker::compute_promises(ModelAction *curr)
2495 for (unsigned int i = 0; i < promises->size(); i++) {
2496 Promise *promise = (*promises)[i];
2497 const ModelAction *act = promise->get_action();
2498 ASSERT(act->is_read());
2499 if (!act->happens_before(curr) &&
2500 !act->could_synchronize_with(curr) &&
2501 promise->is_compatible(curr) &&
2502 promise->get_value() == curr->get_value()) {
2503 curr->get_node()->set_promise(i, act->is_rmw());
2508 /** Checks promises in response to change in ClockVector Threads. */
2509 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2511 for (unsigned int i = 0; i < promises->size(); i++) {
2512 Promise *promise = (*promises)[i];
2513 const ModelAction *act = promise->get_action();
2514 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2515 merge_cv->synchronized_since(act)) {
2516 if (promise->eliminate_thread(tid)) {
2517 //Promise has failed
2518 priv->failed_promise = true;
2525 void ModelChecker::check_promises_thread_disabled()
2527 for (unsigned int i = 0; i < promises->size(); i++) {
2528 Promise *promise = (*promises)[i];
2529 if (promise->has_failed()) {
2530 priv->failed_promise = true;
2537 * @brief Checks promises in response to addition to modification order for
2540 * We test whether threads are still available for satisfying promises after an
2541 * addition to our modification order constraints. Those that are unavailable
2542 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2543 * that promise has failed.
2545 * @param act The ModelAction which updated the modification order
2546 * @param is_read_check Should be true if act is a read and we must check for
2547 * updates to the store from which it read (there is a distinction here for
2548 * RMW's, which are both a load and a store)
2550 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2552 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2554 for (unsigned int i = 0; i < promises->size(); i++) {
2555 Promise *promise = (*promises)[i];
2556 const ModelAction *pread = promise->get_action();
2558 // Is this promise on the same location?
2559 if (!pread->same_var(write))
2562 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2563 priv->failed_promise = true;
2567 // Don't do any lookups twice for the same thread
2568 if (!promise->thread_is_available(act->get_tid()))
2571 if (mo_graph->checkReachable(promise, write)) {
2572 if (mo_graph->checkPromise(write, promise)) {
2573 priv->failed_promise = true;
2581 * Compute the set of writes that may break the current pending release
2582 * sequence. This information is extracted from previou release sequence
2585 * @param curr The current ModelAction. Must be a release sequence fixup
2588 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2590 if (pending_rel_seqs->empty())
2593 struct release_seq *pending = pending_rel_seqs->back();
2594 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2595 const ModelAction *write = pending->writes[i];
2596 curr->get_node()->add_relseq_break(write);
2599 /* NULL means don't break the sequence; just synchronize */
2600 curr->get_node()->add_relseq_break(NULL);
2604 * Build up an initial set of all past writes that this 'read' action may read
2605 * from, as well as any previously-observed future values that must still be valid.
2607 * @param curr is the current ModelAction that we are exploring; it must be a
2610 void ModelChecker::build_may_read_from(ModelAction *curr)
2612 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2614 ASSERT(curr->is_read());
2616 ModelAction *last_sc_write = NULL;
2618 if (curr->is_seqcst())
2619 last_sc_write = get_last_seq_cst_write(curr);
2621 /* Iterate over all threads */
2622 for (i = 0; i < thrd_lists->size(); i++) {
2623 /* Iterate over actions in thread, starting from most recent */
2624 action_list_t *list = &(*thrd_lists)[i];
2625 action_list_t::reverse_iterator rit;
2626 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2627 ModelAction *act = *rit;
2629 /* Only consider 'write' actions */
2630 if (!act->is_write() || act == curr)
2633 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2634 bool allow_read = true;
2636 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2638 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2642 /* Only add feasible reads */
2643 mo_graph->startChanges();
2644 r_modification_order(curr, act);
2645 if (!is_infeasible())
2646 curr->get_node()->add_read_from_past(act);
2647 mo_graph->rollbackChanges();
2650 /* Include at most one act per-thread that "happens before" curr */
2651 if (act->happens_before(curr))
2656 /* Inherit existing, promised future values */
2657 for (i = 0; i < promises->size(); i++) {
2658 const Promise *promise = (*promises)[i];
2659 const ModelAction *promise_read = promise->get_action();
2660 if (promise_read->same_var(curr)) {
2661 /* Only add feasible future-values */
2662 mo_graph->startChanges();
2663 r_modification_order(curr, promise);
2664 if (!is_infeasible())
2665 curr->get_node()->add_read_from_promise(promise_read);
2666 mo_graph->rollbackChanges();
2670 /* We may find no valid may-read-from only if the execution is doomed */
2671 if (!curr->get_node()->read_from_size()) {
2672 priv->no_valid_reads = true;
2676 if (DBG_ENABLED()) {
2677 model_print("Reached read action:\n");
2679 model_print("Printing read_from_past\n");
2680 curr->get_node()->print_read_from_past();
2681 model_print("End printing read_from_past\n");
2685 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2687 for ( ; write != NULL; write = write->get_reads_from()) {
2688 /* UNINIT actions don't have a Node, and they never sleep */
2689 if (write->is_uninitialized())
2691 Node *prevnode = write->get_node()->get_parent();
2693 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2694 if (write->is_release() && thread_sleep)
2696 if (!write->is_rmw())
2703 * @brief Create a new action representing an uninitialized atomic
2704 * @param location The memory location of the atomic object
2705 * @return A pointer to a new ModelAction
2707 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2709 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2710 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2711 act->create_cv(NULL);
2715 static void print_list(action_list_t *list)
2717 action_list_t::iterator it;
2719 model_print("---------------------------------------------------------------------\n");
2721 unsigned int hash = 0;
2723 for (it = list->begin(); it != list->end(); it++) {
2725 hash = hash^(hash<<3)^((*it)->hash());
2727 model_print("HASH %u\n", hash);
2728 model_print("---------------------------------------------------------------------\n");
2731 #if SUPPORT_MOD_ORDER_DUMP
2732 void ModelChecker::dumpGraph(char *filename) const
2735 sprintf(buffer, "%s.dot", filename);
2736 FILE *file = fopen(buffer, "w");
2737 fprintf(file, "digraph %s {\n", filename);
2738 mo_graph->dumpNodes(file);
2739 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2741 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2742 ModelAction *action = *it;
2743 if (action->is_read()) {
2744 fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2745 if (action->get_reads_from() != NULL)
2746 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2748 if (thread_array[action->get_tid()] != NULL) {
2749 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2752 thread_array[action->get_tid()] = action;
2754 fprintf(file, "}\n");
2755 model_free(thread_array);
2760 /** @brief Prints an execution trace summary. */
2761 void ModelChecker::print_summary() const
2763 #if SUPPORT_MOD_ORDER_DUMP
2764 char buffername[100];
2765 sprintf(buffername, "exec%04u", stats.num_total);
2766 mo_graph->dumpGraphToFile(buffername);
2767 sprintf(buffername, "graph%04u", stats.num_total);
2768 dumpGraph(buffername);
2771 model_print("Execution %d:", stats.num_total);
2772 if (isfeasibleprefix())
2775 print_infeasibility(" INFEASIBLE");
2776 print_list(action_trace);
2781 * Add a Thread to the system for the first time. Should only be called once
2783 * @param t The Thread to add
2785 void ModelChecker::add_thread(Thread *t)
2787 thread_map->put(id_to_int(t->get_id()), t);
2788 scheduler->add_thread(t);
2792 * Removes a thread from the scheduler.
2793 * @param the thread to remove.
2795 void ModelChecker::remove_thread(Thread *t)
2797 scheduler->remove_thread(t);
2801 * @brief Get a Thread reference by its ID
2802 * @param tid The Thread's ID
2803 * @return A Thread reference
2805 Thread * ModelChecker::get_thread(thread_id_t tid) const
2807 return thread_map->get(id_to_int(tid));
2811 * @brief Get a reference to the Thread in which a ModelAction was executed
2812 * @param act The ModelAction
2813 * @return A Thread reference
2815 Thread * ModelChecker::get_thread(const ModelAction *act) const
2817 return get_thread(act->get_tid());
2821 * @brief Check if a Thread is currently enabled
2822 * @param t The Thread to check
2823 * @return True if the Thread is currently enabled
2825 bool ModelChecker::is_enabled(Thread *t) const
2827 return scheduler->is_enabled(t);
2831 * @brief Check if a Thread is currently enabled
2832 * @param tid The ID of the Thread to check
2833 * @return True if the Thread is currently enabled
2835 bool ModelChecker::is_enabled(thread_id_t tid) const
2837 return scheduler->is_enabled(tid);
2841 * Switch from a model-checker context to a user-thread context. This is the
2842 * complement of ModelChecker::switch_to_master and must be called from the
2843 * model-checker context
2845 * @param thread The user-thread to switch to
2847 void ModelChecker::switch_from_master(Thread *thread)
2849 scheduler->set_current_thread(thread);
2850 Thread::swap(&system_context, thread);
2854 * Switch from a user-context to the "master thread" context (a.k.a. system
2855 * context). This switch is made with the intention of exploring a particular
2856 * model-checking action (described by a ModelAction object). Must be called
2857 * from a user-thread context.
2859 * @param act The current action that will be explored. May be NULL only if
2860 * trace is exiting via an assertion (see ModelChecker::set_assert and
2861 * ModelChecker::has_asserted).
2862 * @return Return the value returned by the current action
2864 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2867 Thread *old = thread_current();
2868 ASSERT(!old->get_pending());
2869 old->set_pending(act);
2870 if (Thread::swap(old, &system_context) < 0) {
2871 perror("swap threads");
2874 return old->get_return_value();
2878 * Takes the next step in the execution, if possible.
2879 * @param curr The current step to take
2880 * @return Returns the next Thread to run, if any; NULL if this execution
2883 Thread * ModelChecker::take_step(ModelAction *curr)
2885 Thread *curr_thrd = get_thread(curr);
2886 ASSERT(curr_thrd->get_state() == THREAD_READY);
2888 curr = check_current_action(curr);
2890 /* Infeasible -> don't take any more steps */
2891 if (is_infeasible())
2893 else if (isfeasibleprefix() && have_bug_reports()) {
2898 if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2901 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2902 scheduler->remove_thread(curr_thrd);
2904 Thread *next_thrd = get_next_thread(curr);
2906 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2907 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2912 /** Wrapper to run the user's main function, with appropriate arguments */
2913 void user_main_wrapper(void *)
2915 user_main(model->params.argc, model->params.argv);
2918 /** @brief Run ModelChecker for the user program */
2919 void ModelChecker::run()
2923 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2928 * Stash next pending action(s) for thread(s). There
2929 * should only need to stash one thread's action--the
2930 * thread which just took a step--plus the first step
2931 * for any newly-created thread
2933 for (unsigned int i = 0; i < get_num_threads(); i++) {
2934 thread_id_t tid = int_to_id(i);
2935 Thread *thr = get_thread(tid);
2936 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2937 switch_from_master(thr);
2941 /* Catch assertions from prior take_step or from
2942 * between-ModelAction bugs (e.g., data races) */
2946 /* Consume the next action for a Thread */
2947 ModelAction *curr = t->get_pending();
2948 t->set_pending(NULL);
2949 t = take_step(curr);
2950 } while (t && !t->is_model_thread());
2953 * Launch end-of-execution release sequence fixups only when
2954 * the execution is otherwise feasible AND there are:
2956 * (1) pending release sequences
2957 * (2) pending assertions that could be invalidated by a change
2958 * in clock vectors (i.e., data races)
2959 * (3) no pending promises
2961 while (!pending_rel_seqs->empty() &&
2962 is_feasible_prefix_ignore_relseq() &&
2963 !unrealizedraces.empty()) {
2964 model_print("*** WARNING: release sequence fixup action "
2965 "(%zu pending release seuqence(s)) ***\n",
2966 pending_rel_seqs->size());
2967 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2968 std::memory_order_seq_cst, NULL, VALUE_NONE,
2972 } while (next_execution());
2974 model_print("******* Model-checking complete: *******\n");